diff --git a/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs b/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs index 0f0b11222211..070fb23bca99 100644 --- a/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs +++ b/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs @@ -3,14 +3,22 @@ eclipse.preferences.version=1 org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 org.eclipse.jdt.core.compiler.compliance=1.8 org.eclipse.jdt.core.compiler.doc.comment.support=enabled +org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=error org.eclipse.jdt.core.compiler.problem.assertIdentifier=error org.eclipse.jdt.core.compiler.problem.comparingIdentical=error +org.eclipse.jdt.core.compiler.problem.discouragedReference=error org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.problem.finalParameterBound=error +org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=error +org.eclipse.jdt.core.compiler.problem.forbiddenReference=error +org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=error +org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=error org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error org.eclipse.jdt.core.compiler.problem.invalidJavadocTags=enabled org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsDeprecatedRef=disabled org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=disabled org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private +org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error org.eclipse.jdt.core.compiler.problem.missingJavadocComments=ignore org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsOverriding=disabled org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsVisibility=public @@ -20,6 +28,8 @@ org.eclipse.jdt.core.compiler.problem.missingJavadocTagsMethodTypeParameters=dis org.eclipse.jdt.core.compiler.problem.missingJavadocTagsOverriding=disabled org.eclipse.jdt.core.compiler.problem.missingJavadocTagsVisibility=public org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error +org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error +org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=error org.eclipse.jdt.core.compiler.problem.unusedImport=error org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled diff --git a/dev-tools/idea/.idea/libraries/Solr_DIH_core_library.xml b/dev-tools/idea/.idea/libraries/Solr_DIH_core_library.xml new file mode 100644 index 000000000000..d363b92ecd0e --- /dev/null +++ b/dev-tools/idea/.idea/libraries/Solr_DIH_core_library.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml b/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml index 626824763510..8240ff2c8ee7 100644 --- a/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml +++ b/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml @@ -16,9 +16,10 @@ - + + diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 1f3f30c79e40..f9cca9fdfada 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -47,6 +47,9 @@ API Changes * LUCENE-7734: FieldType's copy constructor was widened to accept any IndexableFieldType. (David Smiley) +* LUCENE-7701: Grouping collectors have been refactored, such that groups are + now defined by a GroupSelector implementation. (Alan Woodward) + Bug Fixes * LUCENE-7626: IndexWriter will no longer accept broken token offsets @@ -88,6 +91,35 @@ Other ======================= Lucene 6.6.0 ======================= +Bug Fixes + +* LUCENE-7777: ByteBlockPool.readBytes sometimes throws + ArrayIndexOutOfBoundsException when byte blocks larger than 32 KB + were added (Mike McCandless) + +* LUCENE-7797: The static FSDirectory.listAll(Path) method was always + returning an empty array. (Atkins Chang via Mike McCandless) + +Improvements + +* LUCENE-7782: OfflineSorter now passes the total number of items it + will write to getWriter (Mike McCandless) + +* LUCENE-7785: Move dictionary for Ukrainian analyzer to external dependency. + (Andriy Rysin via Steve Rowe, Dawid Weiss) + +* LUCENE-7801: SortedSetDocValuesReaderState now implements + Accountable so you can see how much RAM it's using (Robert Muir, + Mike McCandless) + +* LUCENE-7792: OfflineSorter can now run concurrently if you pass it + an optional ExecutorService (Dawid Weiss, Mike McCandless) + +Optimizations + +* LUCENE-7787: spatial-extras HeatmapFacetCounter will now short-circuit it's + work when Bits.MatchNoBits is passed. (David Smiley) + Other * LUCENE-7754: Inner classes should be static whenever possible. @@ -112,6 +144,9 @@ Bug Fixes * LUCENE-7749: Made LRUQueryCache delegate the scoreSupplier method. (Martin Amirault via Adrien Grand) +* LUCENE-7769: The UnifiedHighligter wasn't highlighting portions of the query + wrapped in BoostQuery or SpanBoostQuery. (David Smiley, Dmitry Malinin) + Other * LUCENE-7763: Remove outdated comment in IndexWriterConfig.setIndexSort javadocs. diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt index 1ccf56f62787..c7936a4bd7af 100644 --- a/lucene/MIGRATE.txt +++ b/lucene/MIGRATE.txt @@ -61,3 +61,16 @@ do not take deleted docs and doc values updates into account. Index-time boosts are not supported anymore. As a replacement, index-time scoring factors should be indexed in a doc value field and combined with the score at query time using FunctionScoreQuery for instance. + +## Grouping collector refactoring (LUCENE-7701) + +Groups are now defined by GroupSelector classes, making it easier to define new +types of groups. Rather than having term or function specific collection +classes, FirstPassGroupingCollector, AllGroupsCollector and +AllGroupHeadsCollector are now concrete classes taking a GroupSelector. + +SecondPassGroupingCollector is no longer specifically aimed at +collecting TopDocs for each group, but instead takes a GroupReducer that will +perform any type of reduction on the top groups collected on a first-pass. To +reproduce the old behaviour of SecondPassGroupingCollector, you should instead +use TopGroupsCollector. diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java index 75e991fde8ec..c01e26380423 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java @@ -106,7 +106,7 @@ public boolean incrementToken() throws IOException { saveTermBuffer(); return true; } else if (!input.incrementToken()) { - return false; + return false; } /* We build n-grams before and after stopwords. diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java index fe71b7e83f1f..282c2e7ea97f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java @@ -66,10 +66,11 @@ public UAX29URLEmailAnalyzer(Reader stopwords) throws IOException { } /** - * Set maximum allowed token length. If a token is seen - * that exceeds this length then it is discarded. This - * setting only takes effect the next time tokenStream or - * tokenStream is called. + * Set the max allowed token length. Tokens larger than this will be chopped + * up at this token length and emitted as multiple tokens. If you need to + * skip such large tokens, you could increase this max length, and then + * use {@code LengthFilter} to remove long tokens. The default is + * {@link UAX29URLEmailAnalyzer#DEFAULT_MAX_TOKEN_LENGTH}. */ public void setMaxTokenLength(int length) { maxTokenLength = length; @@ -92,6 +93,8 @@ protected TokenStreamComponents createComponents(final String fieldName) { return new TokenStreamComponents(src, tok) { @Override protected void setReader(final Reader reader) { + // So that if maxTokenLength was changed, the change takes + // effect next time tokenStream is called: src.setMaxTokenLength(UAX29URLEmailAnalyzer.this.maxTokenLength); super.setReader(reader); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java index d2b02e437848..842ae510fc22 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java @@ -72,19 +72,34 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { "", "", }; + + /** Absolute maximum sized token */ + public static final int MAX_TOKEN_LENGTH_LIMIT = 1024 * 1024; private int skippedPositions; private int maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH; - /** Set the max allowed token length. Any token longer - * than this is skipped. */ + /** + * Set the max allowed token length. Tokens larger than this will be chopped + * up at this token length and emitted as multiple tokens. If you need to + * skip such large tokens, you could increase this max length, and then + * use {@code LengthFilter} to remove long tokens. The default is + * {@link UAX29URLEmailAnalyzer#DEFAULT_MAX_TOKEN_LENGTH}. + * + * @throws IllegalArgumentException if the given length is outside of the + * range [1, {@value #MAX_TOKEN_LENGTH_LIMIT}]. + */ public void setMaxTokenLength(int length) { if (length < 1) { throw new IllegalArgumentException("maxTokenLength must be greater than zero"); + } else if (length > MAX_TOKEN_LENGTH_LIMIT) { + throw new IllegalArgumentException("maxTokenLength may not exceed " + MAX_TOKEN_LENGTH_LIMIT); + } + if (length != maxTokenLength) { + this.maxTokenLength = length; + scanner.setBufferSize(length); } - this.maxTokenLength = length; - scanner.setBufferSize(Math.min(length, 1024 * 1024)); // limit buffer size to 1M chars } /** @see #setMaxTokenLength */ diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailAnalyzer.java index 14a5165e6bb7..b9321784dd07 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailAnalyzer.java @@ -357,4 +357,27 @@ public void testNoSchemeURLs() throws Exception { public void testRandomStrings() throws Exception { checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER); } + + public void testMaxTokenLengthDefault() throws Exception { + + StringBuilder bToken = new StringBuilder(); + // exact max length: + for(int i=0;i + diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/uk/UkrainianMorfologikAnalyzer.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/uk/UkrainianMorfologikAnalyzer.java index f9d3b2bd8e4a..6955fe334fc1 100644 --- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/uk/UkrainianMorfologikAnalyzer.java +++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/uk/UkrainianMorfologikAnalyzer.java @@ -107,11 +107,17 @@ public UkrainianMorfologikAnalyzer(CharArraySet stopwords, CharArraySet stemExcl @Override protected Reader initReader(String fieldName, Reader reader) { NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); + // different apostrophes builder.add("\u2019", "'"); + builder.add("\u2018", "'"); builder.add("\u02BC", "'"); + builder.add("`", "'"); + builder.add("´", "'"); + // ignored characters builder.add("\u0301", ""); - NormalizeCharMap normMap = builder.build(); + builder.add("\u00AD", ""); + NormalizeCharMap normMap = builder.build(); reader = new MappingCharFilter(normMap, reader); return reader; } @@ -145,7 +151,7 @@ protected TokenStreamComponents createComponents(String fieldName) { private static Dictionary getDictionary() { try { - return Dictionary.read(UkrainianMorfologikAnalyzer.class.getResource("ukrainian.dict")); + return Dictionary.read(UkrainianMorfologikAnalyzer.class.getClassLoader().getResource("ua/net/nlp/ukrainian.dict")); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/lucene/analysis/morfologik/src/resources/org/apache/lucene/analysis/uk/ukrainian.dict b/lucene/analysis/morfologik/src/resources/org/apache/lucene/analysis/uk/ukrainian.dict deleted file mode 100644 index 246897061aa7..000000000000 Binary files a/lucene/analysis/morfologik/src/resources/org/apache/lucene/analysis/uk/ukrainian.dict and /dev/null differ diff --git a/lucene/analysis/morfologik/src/resources/org/apache/lucene/analysis/uk/ukrainian.info b/lucene/analysis/morfologik/src/resources/org/apache/lucene/analysis/uk/ukrainian.info deleted file mode 100644 index 2c69f4b53c73..000000000000 --- a/lucene/analysis/morfologik/src/resources/org/apache/lucene/analysis/uk/ukrainian.info +++ /dev/null @@ -1,10 +0,0 @@ -# -# Dictionary properties. -# - -fsa.dict.separator=+ -fsa.dict.encoding=cp1251 - -fsa.dict.encoder=SUFFIX - -fsa.dict.speller.ignore-diacritics=false diff --git a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/uk/TestUkrainianAnalyzer.java b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/uk/TestUkrainianAnalyzer.java index a38fc63e873f..15b247d5af70 100644 --- a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/uk/TestUkrainianAnalyzer.java +++ b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/uk/TestUkrainianAnalyzer.java @@ -44,22 +44,22 @@ public void testReusableTokenStream() throws Exception { public void testSpecialCharsTokenStream() throws Exception { Analyzer a = new UkrainianMorfologikAnalyzer(); - assertAnalyzesTo(a, "Ця пʼєса, у сво́ю чергу рухається.", - new String[] { "п'єса", "черга", "рухатися" }); + assertAnalyzesTo(a, "м'яса м'я\u0301са м\u02BCяса м\u2019яса м\u2018яса м`яса", + new String[] { "м'ясо", "м'ясо", "м'ясо", "м'ясо", "м'ясо", "м'ясо"}); a.close(); } public void testCapsTokenStream() throws Exception { Analyzer a = new UkrainianMorfologikAnalyzer(); - assertAnalyzesTo(a, "Цей Чайковський і Ґете.", - new String[] { "чайковський", "ґете" }); + assertAnalyzesTo(a, "Цих Чайковського і Ґете.", + new String[] { "Чайковське", "Чайковський", "Ґете" }); a.close(); } public void testSampleSentence() throws Exception { Analyzer a = new UkrainianMorfologikAnalyzer(); assertAnalyzesTo(a, "Це — проект генерування словника з тегами частин мови для української мови.", - new String[] { "проект", "генерування", "словник", "тег", "частина", "мова", "українська", "український", "мова" }); + new String[] { "проект", "генерування", "словник", "тег", "частина", "мова", "українська", "український", "Українська", "мова" }); a.close(); } diff --git a/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java index 77f04164cc3a..f0391f4471d2 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java @@ -38,7 +38,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.WildcardQuery; -import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.util.BytesRef; @@ -86,7 +86,7 @@ public class KNearestNeighborClassifier implements Classifier { * @param indexReader the reader on the index to be used for classification * @param analyzer an {@link Analyzer} used to analyze unseen text * @param similarity the {@link Similarity} to be used by the underlying {@link IndexSearcher} or {@code null} - * (defaults to {@link org.apache.lucene.search.similarities.ClassicSimilarity}) + * (defaults to {@link org.apache.lucene.search.similarities.BM25Similarity}) * @param query a {@link Query} to eventually filter the docs used for training the classifier, or {@code null} * if all the indexed docs should be used * @param k the no. of docs to select in the MLT results to find the nearest neighbor @@ -106,7 +106,7 @@ public KNearestNeighborClassifier(IndexReader indexReader, Similarity similarity if (similarity != null) { this.indexSearcher.setSimilarity(similarity); } else { - this.indexSearcher.setSimilarity(new ClassicSimilarity()); + this.indexSearcher.setSimilarity(new BM25Similarity()); } if (minDocsFreq > 0) { mlt.setMinDocFreq(minDocsFreq); @@ -124,7 +124,13 @@ public KNearestNeighborClassifier(IndexReader indexReader, Similarity similarity */ @Override public ClassificationResult assignClass(String text) throws IOException { - TopDocs knnResults = knnSearch(text); + return classifyFromTopDocs(knnSearch(text)); + } + + /** + * TODO + */ + protected ClassificationResult classifyFromTopDocs(TopDocs knnResults) throws IOException { List> assignedClasses = buildListFromTopDocs(knnResults); ClassificationResult assignedClass = null; double maxscore = -Double.MAX_VALUE; diff --git a/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java index e01090a9cac3..88d41fcd8d2c 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifier.java @@ -56,7 +56,7 @@ public class KNearestNeighborDocumentClassifier extends KNearestNeighborClassifi * * @param indexReader the reader on the index to be used for classification * @param similarity the {@link Similarity} to be used by the underlying {@link IndexSearcher} or {@code null} - * (defaults to {@link org.apache.lucene.search.similarities.ClassicSimilarity}) + * (defaults to {@link org.apache.lucene.search.similarities.BM25Similarity}) * @param query a {@link org.apache.lucene.search.Query} to eventually filter the docs used for training the classifier, or {@code null} * if all the indexed docs should be used * @param k the no. of docs to select in the MLT results to find the nearest neighbor @@ -77,17 +77,7 @@ public KNearestNeighborDocumentClassifier(IndexReader indexReader, Similarity si */ @Override public ClassificationResult assignClass(Document document) throws IOException { - TopDocs knnResults = knnSearch(document); - List> assignedClasses = buildListFromTopDocs(knnResults); - ClassificationResult assignedClass = null; - double maxscore = -Double.MAX_VALUE; - for (ClassificationResult cl : assignedClasses) { - if (cl.getScore() > maxscore) { - assignedClass = cl; - maxscore = cl.getScore(); - } - } - return assignedClass; + return classifyFromTopDocs(knnSearch(document)); } /** diff --git a/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java b/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java index 331a74b70427..6c8f7fde833b 100644 --- a/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java +++ b/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java @@ -88,8 +88,9 @@ public void tearDown() throws Exception { protected ClassificationResult checkCorrectClassification(Classifier classifier, String inputDoc, T expectedResult) throws Exception { ClassificationResult classificationResult = classifier.assignClass(inputDoc); - assertNotNull(classificationResult.getAssignedClass()); - assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass()); + T assignedClass = classificationResult.getAssignedClass(); + assertNotNull(assignedClass); + assertEquals("got an assigned class of " + assignedClass, expectedResult instanceof BytesRef ? ((BytesRef) expectedResult).utf8ToString() : expectedResult, assignedClass instanceof BytesRef ? ((BytesRef) assignedClass).utf8ToString() : assignedClass); double score = classificationResult.getScore(); assertTrue("score should be between 0 and 1, got:" + score, score <= 1 && score >= 0); return classificationResult; diff --git a/lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java index 8c885fb9c72b..a323724e53d2 100644 --- a/lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java +++ b/lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java @@ -33,8 +33,9 @@ public void testBasicDocumentClassification() throws Exception { try { Document videoGameDocument = getVideoGameDocument(); Document batmanDocument = getBatmanDocument(); - checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName, titleFieldName, authorFieldName}), videoGameDocument, VIDEOGAME_RESULT); - checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName, titleFieldName, authorFieldName}), batmanDocument, BATMAN_RESULT); + KNearestNeighborDocumentClassifier classifier = new KNearestNeighborDocumentClassifier(indexReader, null, null, 1, 4, 1, categoryFieldName, field2analyzer, new String[]{textFieldName, titleFieldName, authorFieldName}); + checkCorrectDocumentClassification(classifier, videoGameDocument, VIDEOGAME_RESULT); + checkCorrectDocumentClassification(classifier, batmanDocument, BATMAN_RESULT); // considering only the text we have wrong classification because the text was ambiguos on purpose checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName}), videoGameDocument, BATMAN_RESULT); checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName}), batmanDocument, VIDEOGAME_RESULT); @@ -51,9 +52,10 @@ public void testBasicDocumentClassificationScore() throws Exception { try { Document videoGameDocument = getVideoGameDocument(); Document batmanDocument = getBatmanDocument(); - double score1 = checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName, titleFieldName, authorFieldName}), videoGameDocument, VIDEOGAME_RESULT); + KNearestNeighborDocumentClassifier classifier = new KNearestNeighborDocumentClassifier(indexReader, null, null, 1, 4, 1, categoryFieldName, field2analyzer, new String[]{textFieldName, titleFieldName, authorFieldName}); + double score1 = checkCorrectDocumentClassification(classifier, videoGameDocument, VIDEOGAME_RESULT); assertEquals(1.0,score1,0); - double score2 = checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName, titleFieldName, authorFieldName}), batmanDocument, BATMAN_RESULT); + double score2 = checkCorrectDocumentClassification(classifier, batmanDocument, BATMAN_RESULT); assertEquals(1.0,score2,0); // considering only the text we have wrong classification because the text was ambiguos on purpose double score3 = checkCorrectDocumentClassification(new KNearestNeighborDocumentClassifier(indexReader,null, null, 1, 1, 1, categoryFieldName, field2analyzer, new String[]{textFieldName}), videoGameDocument, BATMAN_RESULT); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java index d7674edf3694..dd89537550da 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java @@ -877,11 +877,11 @@ protected int byteAt(BytesRef ref, int i) { }; } - OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix + "_bkd" + dim, cmp, offlineSorterBufferMB, offlineSorterMaxTempFiles, bytesPerDoc) { + OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix + "_bkd" + dim, cmp, offlineSorterBufferMB, offlineSorterMaxTempFiles, bytesPerDoc, null, 0) { /** We write/read fixed-byte-width file that {@link OfflinePointReader} can read. */ @Override - protected ByteSequencesWriter getWriter(IndexOutput out) { + protected ByteSequencesWriter getWriter(IndexOutput out, long count) { return new ByteSequencesWriter(out) { @Override public void write(byte[] bytes, int off, int len) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java index fb57573fb4e0..8afffd8ef21f 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java @@ -81,10 +81,11 @@ public StandardAnalyzer(Reader stopwords) throws IOException { } /** - * Set maximum allowed token length. If a token is seen - * that exceeds this length then it is discarded. This - * setting only takes effect the next time tokenStream or - * tokenStream is called. + * Set the max allowed token length. Tokens larger than this will be chopped + * up at this token length and emitted as multiple tokens. If you need to + * skip such large tokens, you could increase this max length, and then + * use {@code LengthFilter} to remove long tokens. The default is + * {@link StandardAnalyzer#DEFAULT_MAX_TOKEN_LENGTH}. */ public void setMaxTokenLength(int length) { maxTokenLength = length; @@ -107,6 +108,8 @@ protected TokenStreamComponents createComponents(final String fieldName) { return new TokenStreamComponents(src, tok) { @Override protected void setReader(final Reader reader) { + // So that if maxTokenLength was changed, the change takes + // effect next time tokenStream is called: src.setMaxTokenLength(StandardAnalyzer.this.maxTokenLength); super.setReader(reader); } diff --git a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java index 5b8fc753bedd..ed52f0344050 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java @@ -105,7 +105,11 @@ public final class StandardTokenizer extends Tokenizer { private int maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH; /** - * Set the max allowed token length. No tokens longer than this are emitted. + * Set the max allowed token length. Tokens larger than this will be chopped + * up at this token length and emitted as multiple tokens. If you need to + * skip such large tokens, you could increase this max length, and then + * use {@code LengthFilter} to remove long tokens. The default is + * {@link StandardAnalyzer#DEFAULT_MAX_TOKEN_LENGTH}. * * @throws IllegalArgumentException if the given length is outside of the * range [1, {@value #MAX_TOKEN_LENGTH_LIMIT}]. diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java index 9da1e09530de..e5aae4f09293 100644 --- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java +++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java @@ -456,7 +456,7 @@ private ApplyDeletesResult closeSegmentStates(IndexWriter.ReaderPool pool, Segme try { segStates[j].finish(pool); } catch (Throwable th) { - if (firstExc != null) { + if (firstExc == null) { firstExc = th; } } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java index 88dd6a1b9d93..3cd796b1beee 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java @@ -1031,7 +1031,7 @@ public boolean advanceExact(int targetDocID) throws IOException { } @Override - public int ordValue() { + public int ordValue() throws IOException { return (int) mapping.getGlobalOrds(nextLeaf-1).get(currentValues.ordValue()); } diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java b/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java index ce2050f76de2..4838799097aa 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java @@ -85,7 +85,7 @@ synchronized void decRef(List dvProducersGens) throws IOException { try { dvp.decRef(); } catch (Throwable th) { - if (t != null) { + if (t == null) { t = th; } } diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java index ccbcdf934e48..930340c70297 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java @@ -294,7 +294,7 @@ void notifyReaderClosedListeners(Throwable th) throws IOException { synchronized(readerClosedListeners) { for(ClosedListener listener : readerClosedListeners) { try { - listener.onClose(cacheHelper.getKey()); + listener.onClose(readerCacheHelper.getKey()); } catch (Throwable t) { if (th == null) { th = t; @@ -307,7 +307,7 @@ void notifyReaderClosedListeners(Throwable th) throws IOException { } } - private final IndexReader.CacheHelper cacheHelper = new IndexReader.CacheHelper() { + private final IndexReader.CacheHelper readerCacheHelper = new IndexReader.CacheHelper() { private final IndexReader.CacheKey cacheKey = new IndexReader.CacheKey(); @Override @@ -317,18 +317,35 @@ public CacheKey getKey() { @Override public void addClosedListener(ClosedListener listener) { + ensureOpen(); readerClosedListeners.add(listener); } }; @Override public CacheHelper getReaderCacheHelper() { - return cacheHelper; + return readerCacheHelper; } + /** Wrap the cache helper of the core to add ensureOpen() calls that make + * sure users do not register closed listeners on closed indices. */ + private final IndexReader.CacheHelper coreCacheHelper = new IndexReader.CacheHelper() { + + @Override + public CacheKey getKey() { + return core.getCacheHelper().getKey(); + } + + @Override + public void addClosedListener(ClosedListener listener) { + ensureOpen(); + core.getCacheHelper().addClosedListener(listener); + } + }; + @Override public CacheHelper getCoreCacheHelper() { - return core.getCacheHelper(); + return coreCacheHelper; } @Override diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedDocValues.java b/lucene/core/src/java/org/apache/lucene/index/SortedDocValues.java index 087e4871ed45..bd234159179a 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SortedDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/SortedDocValues.java @@ -46,7 +46,7 @@ protected SortedDocValues() {} * @return ordinal for the document: this is dense, starts at 0, then * increments by 1 for the next value in sorted order. */ - public abstract int ordValue(); + public abstract int ordValue() throws IOException; /** Retrieves the value for the specified ordinal. The returned * {@link BytesRef} may be re-used across calls to {@link #lookupOrd(int)} diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java index f0e7e9848251..bedf17ed7f40 100644 --- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java @@ -484,7 +484,8 @@ public CacheKey getKey() { @Override public void addClosedListener(ClosedListener listener) { - readerClosedListeners.add(listener); + ensureOpen(); + readerClosedListeners.add(listener); } }; diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java index 82162015d0f7..3fe0e9333491 100644 --- a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java +++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java @@ -883,14 +883,6 @@ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String f return DocValues.getBinary(context.reader(), field); } - /** Check whether the given value represents null. This can be - * useful if the {@link BinaryDocValues} returned by {@link #getBinaryDocValues} - * use a special value as a sentinel. - *

NOTE: The null value can only be an EMPTY {@link BytesRef}. */ - protected boolean isNull(int doc, BytesRef term) throws IOException { - return getValueForDoc(doc) == null; - } - @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { docTerms = getBinaryDocValues(context, field); diff --git a/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java b/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java index 1ec322fe5d9d..97589c430dee 100644 --- a/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java +++ b/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java @@ -320,11 +320,9 @@ public boolean needsScores() { * trackDocScores to true as well. * @return a {@link TopFieldCollector} instance which will sort the results by * the sort criteria. - * @throws IOException if there is a low-level I/O error */ public static TopFieldCollector create(Sort sort, int numHits, - boolean fillFields, boolean trackDocScores, boolean trackMaxScore) - throws IOException { + boolean fillFields, boolean trackDocScores, boolean trackMaxScore) { return create(sort, numHits, null, fillFields, trackDocScores, trackMaxScore); } diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java index 50b52ceb07ac..158a2e230343 100644 --- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java @@ -215,7 +215,7 @@ private static String[] listAll(Path dir, Set skipNames) throws IOExcept try (DirectoryStream stream = Files.newDirectoryStream(dir)) { for (Path path : stream) { String name = path.getFileName().toString(); - if (skipNames != null && skipNames.contains(name) == false) { + if (skipNames == null || skipNames.contains(name) == false) { entries.add(name); } } diff --git a/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java b/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java index 1b71440ae9ce..af8e19583bb0 100644 --- a/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java +++ b/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java @@ -324,28 +324,25 @@ public void setBytesRef(BytesRef term, int textStart) { * the current position. */ public void append(final BytesRef bytes) { - int length = bytes.length; - if (length == 0) { - return; - } + int bytesLeft = bytes.length; int offset = bytes.offset; - int overflow = (length + byteUpto) - BYTE_BLOCK_SIZE; - do { - if (overflow <= 0) { - System.arraycopy(bytes.bytes, offset, buffer, byteUpto, length); - byteUpto += length; + while (bytesLeft > 0) { + int bufferLeft = BYTE_BLOCK_SIZE - byteUpto; + if (bytesLeft < bufferLeft) { + // fits within current buffer + System.arraycopy(bytes.bytes, offset, buffer, byteUpto, bytesLeft); + byteUpto += bytesLeft; break; } else { - final int bytesToCopy = length-overflow; - if (bytesToCopy > 0) { - System.arraycopy(bytes.bytes, offset, buffer, byteUpto, bytesToCopy); - offset += bytesToCopy; - length -= bytesToCopy; + // fill up this buffer and move to next one + if (bufferLeft > 0) { + System.arraycopy(bytes.bytes, offset, buffer, byteUpto, bufferLeft); } nextBuffer(); - overflow = overflow - BYTE_BLOCK_SIZE; + bytesLeft -= bufferLeft; + offset += bufferLeft; } - } while(true); + } } /** @@ -353,30 +350,18 @@ public void append(final BytesRef bytes) { * length into the given byte array at offset off. *

Note: this method allows to copy across block boundaries.

*/ - public void readBytes(final long offset, final byte bytes[], final int off, final int length) { - if (length == 0) { - return; - } - int bytesOffset = off; - int bytesLength = length; + public void readBytes(final long offset, final byte bytes[], int bytesOffset, int bytesLength) { + int bytesLeft = bytesLength; int bufferIndex = (int) (offset >> BYTE_BLOCK_SHIFT); - byte[] buffer = buffers[bufferIndex]; int pos = (int) (offset & BYTE_BLOCK_MASK); - int overflow = (pos + length) - BYTE_BLOCK_SIZE; - do { - if (overflow <= 0) { - System.arraycopy(buffer, pos, bytes, bytesOffset, bytesLength); - break; - } else { - final int bytesToCopy = length - overflow; - System.arraycopy(buffer, pos, bytes, bytesOffset, bytesToCopy); - pos = 0; - bytesLength -= bytesToCopy; - bytesOffset += bytesToCopy; - buffer = buffers[++bufferIndex]; - overflow = overflow - BYTE_BLOCK_SIZE; - } - } while (true); + while (bytesLeft > 0) { + byte[] buffer = buffers[bufferIndex++]; + int chunk = Math.min(bytesLeft, BYTE_BLOCK_SIZE - pos); + System.arraycopy(buffer, pos, bytes, bytesOffset, chunk); + bytesOffset += chunk; + bytesLeft -= chunk; + pos = 0; + } } /** diff --git a/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java b/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java index fa223205a7ed..0bd306633dcc 100644 --- a/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java +++ b/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java @@ -24,6 +24,12 @@ import java.util.Comparator; import java.util.List; import java.util.Locale; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicLong; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.ChecksumIndexInput; @@ -72,6 +78,9 @@ public class OfflineSorter { private final int valueLength; private final String tempFileNamePrefix; + private final ExecutorService exec; + private final Semaphore partitionsInRAM; + /** * A bit more descriptive unit for constructors. * @@ -144,13 +153,13 @@ public class SortInfo { /** number of lines of data read */ public int lineCount; /** time spent merging sorted partitions (in milliseconds) */ - public long mergeTime; + public final AtomicLong mergeTimeMS = new AtomicLong(); /** time spent sorting data (in milliseconds) */ - public long sortTime; + public final AtomicLong sortTimeMS = new AtomicLong(); /** total time spent (in milliseconds) */ - public long totalTime; + public long totalTimeMS; /** time spent in i/o read (in milliseconds) */ - public long readTime; + public long readTimeMS; /** read buffer size (in bytes) */ public final long bufferSize = ramBufferSize.bytes; @@ -160,17 +169,15 @@ public SortInfo() {} @Override public String toString() { return String.format(Locale.ROOT, - "time=%.2f sec. total (%.2f reading, %.2f sorting, %.2f merging), lines=%d, temp files=%d, merges=%d, soft ram limit=%.2f MB", - totalTime / 1000.0d, readTime / 1000.0d, sortTime / 1000.0d, mergeTime / 1000.0d, - lineCount, tempMergeFiles, mergeRounds, - (double) bufferSize / MB); + "time=%.2f sec. total (%.2f reading, %.2f sorting, %.2f merging), lines=%d, temp files=%d, merges=%d, soft ram limit=%.2f MB", + totalTimeMS / 1000.0d, readTimeMS / 1000.0d, sortTimeMS.get() / 1000.0d, mergeTimeMS.get() / 1000.0d, + lineCount, tempMergeFiles, mergeRounds, + (double) bufferSize / MB); } } private final BufferSize ramBufferSize; - private final Counter bufferBytesUsed = Counter.newCounter(); - private final SortableBytesRefArray buffer; SortInfo sortInfo; private int maxTempFiles; private final Comparator comparator; @@ -184,7 +191,7 @@ public String toString() { * @see BufferSize#automatic() */ public OfflineSorter(Directory dir, String tempFileNamePrefix) throws IOException { - this(dir, tempFileNamePrefix, DEFAULT_COMPARATOR, BufferSize.automatic(), MAX_TEMPFILES, -1); + this(dir, tempFileNamePrefix, DEFAULT_COMPARATOR, BufferSize.automatic(), MAX_TEMPFILES, -1, null, 0); } /** @@ -193,14 +200,30 @@ public OfflineSorter(Directory dir, String tempFileNamePrefix) throws IOExceptio * @see BufferSize#automatic() */ public OfflineSorter(Directory dir, String tempFileNamePrefix, Comparator comparator) throws IOException { - this(dir, tempFileNamePrefix, comparator, BufferSize.automatic(), MAX_TEMPFILES, -1); + this(dir, tempFileNamePrefix, comparator, BufferSize.automatic(), MAX_TEMPFILES, -1, null, 0); } /** * All-details constructor. If {@code valueLength} is -1 (the default), the length of each value differs; otherwise, - * all values have the specified length. + * all values have the specified length. If you pass a non-null {@code ExecutorService} then it will be + * used to run sorting operations that can be run concurrently, and maxPartitionsInRAM is the maximum + * concurrent in-memory partitions. Thus the maximum possible RAM used by this class while sorting is + * {@code maxPartitionsInRAM * ramBufferSize}. */ - public OfflineSorter(Directory dir, String tempFileNamePrefix, Comparator comparator, BufferSize ramBufferSize, int maxTempfiles, int valueLength) { + public OfflineSorter(Directory dir, String tempFileNamePrefix, Comparator comparator, + BufferSize ramBufferSize, int maxTempfiles, int valueLength, ExecutorService exec, + int maxPartitionsInRAM) { + if (exec != null) { + this.exec = exec; + if (maxPartitionsInRAM <= 0) { + throw new IllegalArgumentException("maxPartitionsInRAM must be > 0; got " + maxPartitionsInRAM); + } + } else { + this.exec = new SameThreadExecutorService(); + maxPartitionsInRAM = 1; + } + this.partitionsInRAM = new Semaphore(maxPartitionsInRAM); + if (ramBufferSize.bytes < ABSOLUTE_MIN_SORT_BUFFER_SIZE) { throw new IllegalArgumentException(MIN_BUFFER_SIZE_MSG + ": " + ramBufferSize.bytes); } @@ -208,14 +231,11 @@ public OfflineSorter(Directory dir, String tempFileNamePrefix, Comparator= 2"); } - if (valueLength == -1) { - buffer = new BytesRefArray(bufferBytesUsed); - } else { - if (valueLength == 0 || valueLength > Short.MAX_VALUE) { - throw new IllegalArgumentException("valueLength must be 1 .. " + Short.MAX_VALUE + "; got: " + valueLength); - } - buffer = new FixedLengthBytesRefArray(valueLength); + + if (valueLength != -1 && (valueLength == 0 || valueLength > Short.MAX_VALUE)) { + throw new IllegalArgumentException("valueLength must be 1 .. " + Short.MAX_VALUE + "; got: " + valueLength); } + this.valueLength = valueLength; this.ramBufferSize = ramBufferSize; this.maxTempFiles = maxTempfiles; @@ -240,9 +260,9 @@ public String getTempFileNamePrefix() { public String sort(String inputFileName) throws IOException { sortInfo = new SortInfo(); - sortInfo.totalTime = System.currentTimeMillis(); + long startMS = System.currentTimeMillis(); - List segments = new ArrayList<>(); + List> segments = new ArrayList<>(); int[] levelCounts = new int[1]; // So we can remove any partially written temp files on exception: @@ -250,11 +270,21 @@ public String sort(String inputFileName) throws IOException { boolean success = false; try (ByteSequencesReader is = getReader(dir.openChecksumInput(inputFileName, IOContext.READONCE), inputFileName)) { - int lineCount; - while ((lineCount = readPartition(is)) > 0) { - segments.add(sortPartition(trackingDir)); + while (true) { + Partition part = readPartition(is); + if (part.count == 0) { + if (partitionsInRAM != null) { + partitionsInRAM.release(); + } + assert part.exhausted; + break; + } + + Callable job = new SortPartitionTask(trackingDir, part); + + segments.add(exec.submit(job)); sortInfo.tempMergeFiles++; - sortInfo.lineCount += lineCount; + sortInfo.lineCount += part.count; levelCounts[0]++; // Handle intermediate merges; we need a while loop to "cascade" the merge when necessary: @@ -268,6 +298,10 @@ public String sort(String inputFileName) throws IOException { levelCounts[mergeLevel] = 0; mergeLevel++; } + + if (part.exhausted) { + break; + } } // TODO: we shouldn't have to do this? Can't we return a merged reader to @@ -286,13 +320,13 @@ public String sort(String inputFileName) throws IOException { result = out.getName(); } } else { - result = segments.get(0); + result = getPartition(segments.get(0)).fileName; } // We should be explicitly removing all intermediate files ourselves unless there is an exception: assert trackingDir.getCreatedFiles().size() == 1 && trackingDir.getCreatedFiles().contains(result); - sortInfo.totalTime = System.currentTimeMillis() - sortInfo.totalTime; + sortInfo.totalTimeMS = System.currentTimeMillis() - startMS; CodecUtil.checkFooter(is.in); @@ -300,6 +334,8 @@ public String sort(String inputFileName) throws IOException { return result; + } catch (InterruptedException ie) { + throw new ThreadInterruptedException(ie); } finally { if (success == false) { IOUtils.deleteFilesIgnoringExceptions(trackingDir, trackingDir.getCreatedFiles()); @@ -307,32 +343,6 @@ public String sort(String inputFileName) throws IOException { } } - /** Sort a single partition in-memory. */ - protected String sortPartition(TrackingDirectoryWrapper trackingDir) throws IOException { - - try (IndexOutput tempFile = trackingDir.createTempOutput(tempFileNamePrefix, "sort", IOContext.DEFAULT); - ByteSequencesWriter out = getWriter(tempFile);) { - - BytesRef spare; - - long start = System.currentTimeMillis(); - BytesRefIterator iter = buffer.iterator(comparator); - sortInfo.sortTime += System.currentTimeMillis() - start; - - while ((spare = iter.next()) != null) { - assert spare.length <= Short.MAX_VALUE; - out.write(spare); - } - - // Clean up the buffer for the next partition. - buffer.clear(); - - CodecUtil.writeFooter(out.out); - - return tempFile.getName(); - } - } - /** Called on exception, to check whether the checksum is also corrupt in this source, and add that * information (checksum matched or didn't) as a suppressed exception. */ private void verifyChecksum(Throwable priorException, ByteSequencesReader reader) throws IOException { @@ -342,122 +352,107 @@ private void verifyChecksum(Throwable priorException, ByteSequencesReader reader } /** Merge the most recent {@code maxTempFile} partitions into a new partition. */ - void mergePartitions(Directory trackingDir, List segments) throws IOException { + void mergePartitions(Directory trackingDir, List> segments) throws IOException { long start = System.currentTimeMillis(); - - List segmentsToMerge; + List> segmentsToMerge; if (segments.size() > maxTempFiles) { segmentsToMerge = segments.subList(segments.size() - maxTempFiles, segments.size()); } else { segmentsToMerge = segments; } - PriorityQueue queue = new PriorityQueue(segmentsToMerge.size()) { - @Override - protected boolean lessThan(FileAndTop a, FileAndTop b) { - return comparator.compare(a.current, b.current) < 0; - } - }; - - ByteSequencesReader[] streams = new ByteSequencesReader[segmentsToMerge.size()]; - - String newSegmentName = null; - - try (ByteSequencesWriter writer = getWriter(trackingDir.createTempOutput(tempFileNamePrefix, "sort", IOContext.DEFAULT))) { + sortInfo.mergeRounds++; - newSegmentName = writer.out.getName(); - - // Open streams and read the top for each file - for (int i = 0; i < segmentsToMerge.size(); i++) { - streams[i] = getReader(dir.openChecksumInput(segmentsToMerge.get(i), IOContext.READONCE), segmentsToMerge.get(i)); - BytesRef item = null; - try { - item = streams[i].next(); - } catch (Throwable t) { - verifyChecksum(t, streams[i]); - } - assert item != null; - queue.insertWithOverflow(new FileAndTop(i, item)); - } - - // Unix utility sort() uses ordered array of files to pick the next line from, updating - // it as it reads new lines. The PQ used here is a more elegant solution and has - // a nicer theoretical complexity bound :) The entire sorting process is I/O bound anyway - // so it shouldn't make much of a difference (didn't check). - FileAndTop top; - while ((top = queue.top()) != null) { - writer.write(top.current); - try { - top.current = streams[top.fd].next(); - } catch (Throwable t) { - verifyChecksum(t, streams[top.fd]); - } + MergePartitionsTask task = new MergePartitionsTask(trackingDir, new ArrayList<>(segmentsToMerge)); - if (top.current != null) { - queue.updateTop(); - } else { - queue.pop(); - } - } + segmentsToMerge.clear(); + segments.add(exec.submit(task)); - CodecUtil.writeFooter(writer.out); + sortInfo.tempMergeFiles++; + } - for(ByteSequencesReader reader : streams) { - CodecUtil.checkFooter(reader.in); - } - - sortInfo.mergeTime += System.currentTimeMillis() - start; - sortInfo.mergeRounds++; - } finally { - IOUtils.close(streams); + /** Holds one partition of items, either loaded into memory or based on a file. */ + private static class Partition { + public final SortableBytesRefArray buffer; + public final boolean exhausted; + public final long count; + public final String fileName; + + /** A partition loaded into memory. */ + public Partition(SortableBytesRefArray buffer, boolean exhausted) { + this.buffer = buffer; + this.fileName = null; + this.count = buffer.size(); + this.exhausted = exhausted; } - IOUtils.deleteFiles(trackingDir, segmentsToMerge); - - segmentsToMerge.clear(); - segments.add(newSegmentName); - - sortInfo.tempMergeFiles++; + /** An on-disk partition. */ + public Partition(String fileName, long count) { + this.buffer = null; + this.fileName = fileName; + this.count = count; + this.exhausted = true; + } } - /** Read in a single partition of data */ - int readPartition(ByteSequencesReader reader) throws IOException { - long start = System.currentTimeMillis(); - if (valueLength != -1) { - int limit = ramBufferSize.bytes / valueLength; - for(int i=0;i ramBufferSize.bytes) { + break; + } } - buffer.append(item); } - } else { - while (true) { - BytesRef item = null; - try { - item = reader.next(); - } catch (Throwable t) { - verifyChecksum(t, reader); - } - if (item == null) { - break; - } - buffer.append(item); - // Account for the created objects. - // (buffer slots do not account to buffer size.) - if (bufferBytesUsed.get() > ramBufferSize.bytes) { - break; - } + sortInfo.readTimeMS += System.currentTimeMillis() - start; + success = true; + return new Partition(buffer, exhausted); + } finally { + if (success == false && partitionsInRAM != null) { + partitionsInRAM.release(); } } - sortInfo.readTime += System.currentTimeMillis() - start; - return buffer.size(); } static class FileAndTop { @@ -471,7 +466,7 @@ static class FileAndTop { } /** Subclasses can override to change how byte sequences are written to disk. */ - protected ByteSequencesWriter getWriter(IndexOutput out) throws IOException { + protected ByteSequencesWriter getWriter(IndexOutput out, long itemCount) throws IOException { return new ByteSequencesWriter(out); } @@ -587,5 +582,148 @@ public void close() throws IOException { /** Returns the comparator in use to sort entries */ public Comparator getComparator() { return comparator; - } + } + + /** Sorts one in-memory partition, writes it to disk, and returns the resulting file-based partition. */ + private class SortPartitionTask implements Callable { + + private final Directory dir; + private final Partition part; + + public SortPartitionTask(Directory dir, Partition part) { + this.dir = dir; + this.part = part; + } + + @Override + public Partition call() throws IOException { + try (IndexOutput tempFile = dir.createTempOutput(tempFileNamePrefix, "sort", IOContext.DEFAULT); + ByteSequencesWriter out = getWriter(tempFile, part.buffer.size());) { + + BytesRef spare; + + long startMS = System.currentTimeMillis(); + BytesRefIterator iter = part.buffer.iterator(comparator); + sortInfo.sortTimeMS.addAndGet(System.currentTimeMillis() - startMS); + + int count = 0; + while ((spare = iter.next()) != null) { + assert spare.length <= Short.MAX_VALUE; + out.write(spare); + count++; + } + + assert count == part.count; + + CodecUtil.writeFooter(out.out); + part.buffer.clear(); + + return new Partition(tempFile.getName(), part.count); + } finally { + if (partitionsInRAM != null) { + partitionsInRAM.release(); + } + } + } + } + + private Partition getPartition(Future future) throws IOException { + try { + return future.get(); + } catch (InterruptedException ie) { + throw new ThreadInterruptedException(ie); + } catch (ExecutionException ee) { + IOUtils.reThrow(ee.getCause()); + // oh so soon to go away: + return null; + } + } + + /** Merges multiple file-based partitions to a single on-disk partition. */ + private class MergePartitionsTask implements Callable { + private final Directory dir; + private final List> segmentsToMerge; + + public MergePartitionsTask(Directory dir, List> segmentsToMerge) { + this.dir = dir; + this.segmentsToMerge = segmentsToMerge; + } + + @Override + public Partition call() throws IOException { + long totalCount = 0; + for (Future segment : segmentsToMerge) { + totalCount += getPartition(segment).count; + } + + PriorityQueue queue = new PriorityQueue(segmentsToMerge.size()) { + @Override + protected boolean lessThan(FileAndTop a, FileAndTop b) { + return comparator.compare(a.current, b.current) < 0; + } + }; + + ByteSequencesReader[] streams = new ByteSequencesReader[segmentsToMerge.size()]; + + String newSegmentName = null; + + long startMS = System.currentTimeMillis(); + try (ByteSequencesWriter writer = getWriter(dir.createTempOutput(tempFileNamePrefix, "sort", IOContext.DEFAULT), totalCount)) { + + newSegmentName = writer.out.getName(); + + // Open streams and read the top for each file + for (int i = 0; i < segmentsToMerge.size(); i++) { + Partition segment = getPartition(segmentsToMerge.get(i)); + streams[i] = getReader(dir.openChecksumInput(segment.fileName, IOContext.READONCE), segment.fileName); + + BytesRef item = null; + try { + item = streams[i].next(); + } catch (Throwable t) { + verifyChecksum(t, streams[i]); + } + assert item != null; + queue.insertWithOverflow(new FileAndTop(i, item)); + } + + // Unix utility sort() uses ordered array of files to pick the next line from, updating + // it as it reads new lines. The PQ used here is a more elegant solution and has + // a nicer theoretical complexity bound :) The entire sorting process is I/O bound anyway + // so it shouldn't make much of a difference (didn't check). + FileAndTop top; + while ((top = queue.top()) != null) { + writer.write(top.current); + try { + top.current = streams[top.fd].next(); + } catch (Throwable t) { + verifyChecksum(t, streams[top.fd]); + } + + if (top.current != null) { + queue.updateTop(); + } else { + queue.pop(); + } + } + + CodecUtil.writeFooter(writer.out); + + for(ByteSequencesReader reader : streams) { + CodecUtil.checkFooter(reader.in); + } + + sortInfo.mergeTimeMS.addAndGet(System.currentTimeMillis() - startMS); + } finally { + IOUtils.close(streams); + } + List toDelete = new ArrayList<>(); + for (Future segment : segmentsToMerge) { + toDelete.add(getPartition(segment).fileName); + } + IOUtils.deleteFiles(dir, toDelete); + + return new Partition(newSegmentName, totalCount); + } + } } diff --git a/lucene/core/src/java/org/apache/lucene/util/SameThreadExecutorService.java b/lucene/core/src/java/org/apache/lucene/util/SameThreadExecutorService.java new file mode 100644 index 000000000000..169b9f83bb31 --- /dev/null +++ b/lucene/core/src/java/org/apache/lucene/util/SameThreadExecutorService.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.lucene.util; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; + +/** An {@code ExecutorService} that executes tasks immediately in the calling thread during submit. */ +class SameThreadExecutorService extends AbstractExecutorService { + private volatile boolean shutdown; + + @Override + public void execute(Runnable command) { + checkShutdown(); + command.run(); + } + + @Override + public List shutdownNow() { + shutdown(); + return Collections.emptyList(); + } + + @Override + public void shutdown() { + this.shutdown = true; + } + + @Override + public boolean isTerminated() { + // Simplified: we don't check for any threads hanging in execute (we could + // introduce an atomic counter, but there seems to be no point). + return shutdown == true; + } + + @Override + public boolean isShutdown() { + return shutdown == true; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + // See comment in isTerminated(); + return true; + } + + private void checkShutdown() { + if (shutdown) { + throw new RejectedExecutionException("Executor is shut down."); + } + } +} diff --git a/lucene/core/src/java/org/apache/lucene/util/Version.java b/lucene/core/src/java/org/apache/lucene/util/Version.java index da6d653bce71..f6e6adcc5ed1 100644 --- a/lucene/core/src/java/org/apache/lucene/util/Version.java +++ b/lucene/core/src/java/org/apache/lucene/util/Version.java @@ -101,6 +101,13 @@ public final class Version { @Deprecated public static final Version LUCENE_6_5_0 = new Version(6, 5, 0); + /** + * Match settings and bugs in Lucene's 6.5.1 release. + * @deprecated Use latest + */ + @Deprecated + public static final Version LUCENE_6_5_1 = new Version(6, 5, 1); + /** * Match settings and bugs in Lucene's 6.6.0 release. * @deprecated Use latest diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java index eeb40fa41159..1575a5b28be3 100644 --- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java +++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java @@ -884,11 +884,11 @@ protected int byteAt(BytesRef ref, int i) { }; } - OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix + "_bkd" + dim, cmp, offlineSorterBufferMB, offlineSorterMaxTempFiles, bytesPerDoc) { + OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix + "_bkd" + dim, cmp, offlineSorterBufferMB, offlineSorterMaxTempFiles, bytesPerDoc, null, 0) { /** We write/read fixed-byte-width file that {@link OfflinePointReader} can read. */ @Override - protected ByteSequencesWriter getWriter(IndexOutput out) { + protected ByteSequencesWriter getWriter(IndexOutput out, long count) { return new ByteSequencesWriter(out) { @Override public void write(byte[] bytes, int off, int len) throws IOException { diff --git a/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java index 2cc9274ad420..6abbc2bfc5bd 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java @@ -393,4 +393,27 @@ public void testNormalize() { Analyzer a = new StandardAnalyzer(); assertEquals(new BytesRef("\"\\à3[]()! cz@"), a.normalize("dummy", "\"\\À3[]()! Cz@")); } + + public void testMaxTokenLengthDefault() throws Exception { + StandardAnalyzer a = new StandardAnalyzer(); + + StringBuilder bToken = new StringBuilder(); + // exact max length: + for(int i=0;i {}); + r.leaves().get(0).reader().getReaderCacheHelper().addClosedListener(key -> {}); + r.leaves().get(0).reader().getCoreCacheHelper().addClosedListener(key -> {}); + + // But now we close + r.close(); + expectThrows(AlreadyClosedException.class, () -> r.getReaderCacheHelper().addClosedListener(key -> {})); + expectThrows(AlreadyClosedException.class, () -> r.leaves().get(0).reader().getReaderCacheHelper().addClosedListener(key -> {})); + expectThrows(AlreadyClosedException.class, () -> r.leaves().get(0).reader().getCoreCacheHelper().addClosedListener(key -> {})); + + dir.close(); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java index 4ef580bf0717..be3a2af18b74 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java @@ -2403,4 +2403,86 @@ public void testTieBreak() throws Exception { } IOUtils.close(r, w, dir); } + + public void testIndexSortWithSparseField() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); + SortField sortField = new SortField("dense_int", SortField.Type.INT, true); + Sort indexSort = new Sort(sortField); + iwc.setIndexSort(indexSort); + IndexWriter w = new IndexWriter(dir, iwc); + Field textField = newTextField("sparse_text", "", Field.Store.NO); + for (int i = 0; i < 128; i++) { + Document doc = new Document(); + doc.add(new NumericDocValuesField("dense_int", i)); + if (i < 64) { + doc.add(new NumericDocValuesField("sparse_int", i)); + doc.add(new BinaryDocValuesField("sparse_binary", new BytesRef(Integer.toString(i)))); + textField.setStringValue("foo"); + doc.add(textField); + } + w.addDocument(doc); + } + w.commit(); + w.forceMerge(1); + DirectoryReader r = DirectoryReader.open(w); + assertEquals(1, r.leaves().size()); + LeafReader leafReader = r.leaves().get(0).reader(); + + NumericDocValues denseValues = leafReader.getNumericDocValues("dense_int"); + NumericDocValues sparseValues = leafReader.getNumericDocValues("sparse_int"); + BinaryDocValues sparseBinaryValues = leafReader.getBinaryDocValues("sparse_binary"); + NumericDocValues normsValues = leafReader.getNormValues("sparse_text"); + for(int docID = 0; docID < 128; docID++) { + assertTrue(denseValues.advanceExact(docID)); + assertEquals(127-docID, (int) denseValues.longValue()); + if (docID >= 64) { + assertTrue(denseValues.advanceExact(docID)); + assertTrue(sparseValues.advanceExact(docID)); + assertTrue(sparseBinaryValues.advanceExact(docID)); + assertTrue(normsValues.advanceExact(docID)); + assertEquals(124, normsValues.longValue()); + assertEquals(127-docID, (int) sparseValues.longValue()); + assertEquals(new BytesRef(Integer.toString(127-docID)), sparseBinaryValues.binaryValue()); + } else { + assertFalse(sparseBinaryValues.advanceExact(docID)); + assertFalse(sparseValues.advanceExact(docID)); + assertFalse(normsValues.advanceExact(docID)); + } + } + IOUtils.close(r, w, dir); + } + + public void testIndexSortOnSparseField() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); + SortField sortField = new SortField("sparse", SortField.Type.INT, false); + sortField.setMissingValue(Integer.MIN_VALUE); + Sort indexSort = new Sort(sortField); + iwc.setIndexSort(indexSort); + IndexWriter w = new IndexWriter(dir, iwc); + for (int i = 0; i < 128; i++) { + Document doc = new Document(); + if (i < 64) { + doc.add(new NumericDocValuesField("sparse", i)); + } + w.addDocument(doc); + } + w.commit(); + w.forceMerge(1); + DirectoryReader r = DirectoryReader.open(w); + assertEquals(1, r.leaves().size()); + LeafReader leafReader = r.leaves().get(0).reader(); + NumericDocValues sparseValues = leafReader.getNumericDocValues("sparse"); + for(int docID = 0; docID < 128; docID++) { + if (docID >= 64) { + assertTrue(sparseValues.advanceExact(docID)); + assertEquals(docID-64, (int) sparseValues.longValue()); + } else { + assertFalse(sparseValues.advanceExact(docID)); + } + } + IOUtils.close(r, w, dir); + } + } diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java index 5e4a5934d345..23c1bcdd6fd0 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java @@ -14,16 +14,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.lucene.store; +package org.apache.lucene.store; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Set; +import org.apache.lucene.mockfile.ExtrasFS; import org.apache.lucene.util.LuceneTestCase; public class TestDirectory extends LuceneTestCase { @@ -137,5 +140,17 @@ public void testNotDirectory() throws Throwable { fsDir.close(); } } + + public void testListAll() throws Throwable { + Path dir = createTempDir("testdir"); + assumeFalse("this test does not expect extra files", dir.getFileSystem().provider() instanceof ExtrasFS); + Path file1 = Files.createFile(dir.resolve("tempfile1")); + Path file2 = Files.createFile(dir.resolve("tempfile2")); + Set files = new HashSet<>(Arrays.asList(FSDirectory.listAll(dir))); + + assertTrue(files.size() == 2); + assertTrue(files.contains(file1.getFileName().toString())); + assertTrue(files.contains(file2.getFileName().toString())); + } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java b/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java index df73687d17c8..475f716acf5e 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; public class TestByteBlockPool extends LuceneTestCase { @@ -34,8 +35,7 @@ public void testReadAndWrite() throws IOException { final int numValues = atLeast(100); BytesRefBuilder ref = new BytesRefBuilder(); for (int i = 0; i < numValues; i++) { - final String value = TestUtil.randomRealisticUnicodeString(random(), - maxLength); + final String value = TestUtil.randomRealisticUnicodeString(random(), maxLength); list.add(new BytesRef(value)); ref.copyChars(value); pool.append(ref.get()); @@ -76,5 +76,33 @@ public void testReadAndWrite() throws IOException { pool.nextBuffer(); // prepare for next iter } } - } + } + + public void testLargeRandomBlocks() throws IOException { + Counter bytesUsed = Counter.newCounter(); + ByteBlockPool pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed)); + pool.nextBuffer(); + + List items = new ArrayList<>(); + for (int i=0;i<100;i++) { + int size; + if (random().nextBoolean()) { + size = TestUtil.nextInt(random(), 100, 1000); + } else { + size = TestUtil.nextInt(random(), 50000, 100000); + } + byte[] bytes = new byte[size]; + random().nextBytes(bytes); + items.add(bytes); + pool.append(new BytesRef(bytes)); + } + + long position = 0; + for (byte[] expected : items) { + byte[] actual = new byte[expected.length]; + pool.readBytes(position, actual, 0, actual.length); + assertTrue(Arrays.equals(expected, actual)); + position += expected.length; + } + } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java index 49ed11013d9c..68ac0a285c40 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java @@ -24,10 +24,15 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.CorruptingIndexOutput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; @@ -72,11 +77,25 @@ public void testSingleLine() throws Exception { } } + private ExecutorService randomExecutorServiceOrNull() { + if (random().nextBoolean()) { + return null; + } else { + return new ThreadPoolExecutor(1, TestUtil.nextInt(random(), 2, 6), Long.MAX_VALUE, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), + new NamedThreadFactory("TestIndexSearcher")); + } + } + public void testIntermediateMerges() throws Exception { // Sort 20 mb worth of data with 1mb buffer, binary merging. try (Directory dir = newDirectory()) { - SortInfo info = checkSort(dir, new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), 2, -1), + ExecutorService exec = randomExecutorServiceOrNull(); + SortInfo info = checkSort(dir, new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), 2, -1, exec, TestUtil.nextInt(random(), 1, 4)), generateRandom((int)OfflineSorter.MB * 20)); + if (exec != null) { + exec.shutdownNow(); + } assertTrue(info.mergeRounds > 10); } } @@ -84,8 +103,12 @@ public void testIntermediateMerges() throws Exception { public void testSmallRandom() throws Exception { // Sort 20 mb worth of data with 1mb buffer. try (Directory dir = newDirectory()) { - SortInfo sortInfo = checkSort(dir, new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), OfflineSorter.MAX_TEMPFILES, -1), + ExecutorService exec = randomExecutorServiceOrNull(); + SortInfo sortInfo = checkSort(dir, new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), OfflineSorter.MAX_TEMPFILES, -1, exec, TestUtil.nextInt(random(), 1, 4)), generateRandom((int)OfflineSorter.MB * 20)); + if (exec != null) { + exec.shutdownNow(); + } assertEquals(3, sortInfo.mergeRounds); } } @@ -94,8 +117,12 @@ public void testSmallRandom() throws Exception { public void testLargerRandom() throws Exception { // Sort 100MB worth of data with 15mb buffer. try (Directory dir = newFSDirectory(createTempDir())) { - checkSort(dir, new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(16), OfflineSorter.MAX_TEMPFILES, -1), + ExecutorService exec = randomExecutorServiceOrNull(); + checkSort(dir, new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(16), OfflineSorter.MAX_TEMPFILES, -1, exec, TestUtil.nextInt(random(), 1, 4)), generateRandom((int)OfflineSorter.MB * 100)); + if (exec != null) { + exec.shutdownNow(); + } } } @@ -357,7 +384,7 @@ public IndexOutput createTempOutput(String prefix, String suffix, IOContext cont writeAll(unsorted, generateFixed((int) (OfflineSorter.MB * 3))); CorruptIndexException e = expectThrows(CorruptIndexException.class, () -> { - new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), 10, -1).sort(unsorted.getName()); + new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), 10, -1, null, 0).sort(unsorted.getName()); }); assertTrue(e.getMessage().contains("checksum failed (hardware problem?)")); } @@ -407,7 +434,7 @@ protected void corruptFile() throws IOException { writeAll(unsorted, generateFixed((int) (OfflineSorter.MB * 3))); EOFException e = expectThrows(EOFException.class, () -> { - new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), 10, -1).sort(unsorted.getName()); + new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(1), 10, -1, null, 0).sort(unsorted.getName()); }); assertEquals(1, e.getSuppressed().length); assertTrue(e.getSuppressed()[0] instanceof CorruptIndexException); @@ -429,8 +456,12 @@ public void testFixedLengthHeap() throws Exception { CodecUtil.writeFooter(out); } - OfflineSorter sorter = new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(4), OfflineSorter.MAX_TEMPFILES, Integer.BYTES); + ExecutorService exec = randomExecutorServiceOrNull(); + OfflineSorter sorter = new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(4), OfflineSorter.MAX_TEMPFILES, Integer.BYTES, exec, TestUtil.nextInt(random(), 1, 4)); sorter.sort(out.getName()); + if (exec != null) { + exec.shutdownNow(); + } // 1 MB of ints with 4 MH heap allowed should have been sorted in a single heap partition: assertEquals(0, sorter.sortInfo.mergeRounds); dir.close(); @@ -447,7 +478,7 @@ public void testFixedLengthLiesLiesLies() throws Exception { CodecUtil.writeFooter(out); } - OfflineSorter sorter = new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(4), OfflineSorter.MAX_TEMPFILES, Long.BYTES); + OfflineSorter sorter = new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(4), OfflineSorter.MAX_TEMPFILES, Long.BYTES, null, 0); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { sorter.sort(out.getName()); }); @@ -455,18 +486,59 @@ public void testFixedLengthLiesLiesLies() throws Exception { dir.close(); } + // OfflineSorter should not call my BytesSequencesReader.next() again after it already returned null: + public void testOverNexting() throws Exception { + Directory dir = newDirectory(); + IndexOutput out = dir.createTempOutput("unsorted", "tmp", IOContext.DEFAULT); + try (ByteSequencesWriter w = new OfflineSorter.ByteSequencesWriter(out)) { + byte[] bytes = new byte[Integer.BYTES]; + random().nextBytes(bytes); + w.write(bytes); + CodecUtil.writeFooter(out); + } + + new OfflineSorter(dir, "foo", OfflineSorter.DEFAULT_COMPARATOR, BufferSize.megabytes(4), OfflineSorter.MAX_TEMPFILES, Integer.BYTES, null, 0) { + @Override + protected ByteSequencesReader getReader(ChecksumIndexInput in, String name) throws IOException { + ByteSequencesReader other = super.getReader(in, name); + + return new ByteSequencesReader(in, name) { + + private boolean alreadyEnded; + + @Override + public BytesRef next() throws IOException { + // if we returned null already, OfflineSorter should not call next() again + assertFalse(alreadyEnded); + BytesRef result = other.next(); + if (result == null) { + alreadyEnded = true; + } + return result; + } + + @Override + public void close() throws IOException { + other.close(); + } + }; + } + }.sort(out.getName()); + dir.close(); + } + public void testInvalidFixedLength() throws Exception { IllegalArgumentException e; e = expectThrows(IllegalArgumentException.class, () -> { new OfflineSorter(null, "foo", OfflineSorter.DEFAULT_COMPARATOR, - BufferSize.megabytes(1), OfflineSorter.MAX_TEMPFILES, 0); + BufferSize.megabytes(1), OfflineSorter.MAX_TEMPFILES, 0, null, 0); }); assertEquals("valueLength must be 1 .. 32767; got: 0", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { new OfflineSorter(null, "foo", OfflineSorter.DEFAULT_COMPARATOR, - BufferSize.megabytes(1), OfflineSorter.MAX_TEMPFILES, Integer.MAX_VALUE); + BufferSize.megabytes(1), OfflineSorter.MAX_TEMPFILES, Integer.MAX_VALUE, null, 0); }); assertEquals("valueLength must be 1 .. 32767; got: 2147483647", e.getMessage()); } diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java index 96db60f4e421..ea67294be52d 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java @@ -105,7 +105,7 @@ public FacetsConfig() { * * @return The default configuration to be used for dimensions that * are not yet set in the {@link FacetsConfig} */ - protected DimConfig getDefaultDimConfig(){ + protected DimConfig getDefaultDimConfig() { return DEFAULT_DIM_CONFIG; } diff --git a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java index 6bcfa465b474..cff1cca06d8d 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -33,6 +34,8 @@ import org.apache.lucene.index.MultiDocValues.OrdinalMap; import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; /** @@ -104,11 +107,46 @@ public DefaultSortedSetDocValuesReaderState(IndexReader reader, String field) th } } + /** + * Return the memory usage of this object in bytes. Negative values are illegal. + */ + @Override + public long ramBytesUsed() { + synchronized (cachedOrdMaps) { + long bytes = 0; + for (OrdinalMap map : cachedOrdMaps.values()) { + bytes += map.ramBytesUsed(); + } + + return bytes; + } + } + + /** + * Returns nested resources of this class. + * The result should be a point-in-time snapshot (to avoid race conditions). + * @see Accountables + */ + @Override + public Collection getChildResources() { + synchronized (cachedOrdMaps) { + return Accountables.namedAccountables("DefaultSortedSetDocValuesReaderState", cachedOrdMaps); + } + } + + @Override + public String toString() { + return "DefaultSortedSetDocValuesReaderState(field=" + field + " origReader=" + origReader + ")"; + } + /** Return top-level doc values. */ @Override public SortedSetDocValues getDocValues() throws IOException { // TODO: this is dup'd from slow composite reader wrapper ... can we factor it out to share? OrdinalMap map = null; + // TODO: why are we lazy about this? It's better if ctor pays the cost, not first query? Oh, but we + // call this method from ctor, ok. Also, we only ever store one entry in the map (for key=field) so + // why are we using a map? synchronized (cachedOrdMaps) { map = cachedOrdMaps.get(field); if (map == null) { diff --git a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesReaderState.java b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesReaderState.java index 83ed3f04f25a..546b3198ed25 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesReaderState.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesReaderState.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.util.Accountable; /** Wraps a {@link IndexReader} and resolves ords * using existing {@link SortedSetDocValues} APIs without a @@ -38,7 +39,7 @@ * so you should create it once and re-use that one instance * for a given {@link IndexReader}. */ -public abstract class SortedSetDocValuesReaderState { +public abstract class SortedSetDocValuesReaderState implements Accountable { /** Holds start/end range of ords, which maps to one * dimension (someday we may generalize it to map to diff --git a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java index 5aed22b4e841..3ad6e683d4fa 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -39,6 +40,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Accountable; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.TestUtil; @@ -178,6 +180,17 @@ public void testSparseFacets() throws Exception { assertEquals("dim=b path=[] value=2 childCount=2\n bar1 (1)\n bar2 (1)\n", results.get(1).toString()); assertEquals("dim=c path=[] value=1 childCount=1\n baz1 (1)\n", results.get(2).toString()); + Collection resources = state.getChildResources(); + assertTrue(state.toString().contains(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); + if (searcher.getIndexReader().leaves().size() > 1) { + assertTrue(state.ramBytesUsed() > 0); + assertFalse(resources.isEmpty()); + assertTrue(resources.toString().contains(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); + } else { + assertEquals(0, state.ramBytesUsed()); + assertTrue(resources.isEmpty()); + } + searcher.getIndexReader().close(); dir.close(); } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java index b5fbdc36f1a8..503b9529bd65 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java @@ -18,27 +18,62 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SimpleCollector; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.util.FixedBitSet; /** - * This collector specializes in collecting the most relevant document (group head) for each group that match the query. + * This collector specializes in collecting the most relevant document (group head) for each + * group that matches the query. + * + * Clients should create new collectors by calling {@link #newCollector(GroupSelector, Sort)} * * @lucene.experimental */ @SuppressWarnings({"unchecked","rawtypes"}) public abstract class AllGroupHeadsCollector extends SimpleCollector { + private final GroupSelector groupSelector; + protected final Sort sort; + protected final int[] reversed; protected final int compIDXEnd; - protected final TemporalResult temporalResult; - protected AllGroupHeadsCollector(int numberOfSorts) { - this.reversed = new int[numberOfSorts]; - this.compIDXEnd = numberOfSorts - 1; - temporalResult = new TemporalResult(); + protected Map> heads = new HashMap<>(); + + protected LeafReaderContext context; + protected Scorer scorer; + + /** + * Create a new AllGroupHeadsCollector based on the type of within-group Sort required + * @param selector a GroupSelector to define the groups + * @param sort the within-group sort to use to choose the group head document + * @param the group value type + */ + public static AllGroupHeadsCollector newCollector(GroupSelector selector, Sort sort) { + if (sort.equals(Sort.RELEVANCE)) + return new ScoringGroupHeadsCollector<>(selector, sort); + return new SortingGroupHeadsCollector<>(selector, sort); + } + + private AllGroupHeadsCollector(GroupSelector selector, Sort sort) { + this.groupSelector = selector; + this.sort = sort; + this.reversed = new int[sort.getSort().length]; + final SortField[] sortFields = sort.getSort(); + for (int i = 0; i < sortFields.length; i++) { + reversed[i] = sortFields[i].getReverse() ? -1 : 1; + } + this.compIDXEnd = this.reversed.length - 1; } /** @@ -78,35 +113,28 @@ public int groupHeadsSize() { return getCollectedGroupHeads().size(); } - /** - * Returns the group head and puts it into {@link #temporalResult}. - * If the group head wasn't encountered before then it will be added to the collected group heads. - *

- * The {@link TemporalResult#stop} property will be true if the group head wasn't encountered before - * otherwise false. - * - * @param doc The document to retrieve the group head for. - * @throws IOException If I/O related errors occur - */ - protected abstract void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException; - /** * Returns the collected group heads. * Subsequent calls should return the same group heads. * * @return the collected group heads */ - protected abstract Collection> getCollectedGroupHeads(); + protected Collection> getCollectedGroupHeads() { + return heads.values(); + } @Override public void collect(int doc) throws IOException { - retrieveGroupHeadAndAddIfNotExist(doc); - if (temporalResult.stop) { + groupSelector.advanceTo(doc); + T groupValue = groupSelector.currentValue(); + if (heads.containsKey(groupValue) == false) { + groupValue = groupSelector.copyValue(); + heads.put(groupValue, newGroupHead(doc, groupValue, context, scorer)); return; } - GroupHead groupHead = temporalResult.groupHead; - // Ok now we need to check if the current doc is more relevant then current doc for this group + GroupHead groupHead = heads.get(groupValue); + // Ok now we need to check if the current doc is more relevant than top doc for this group for (int compIDX = 0; ; compIDX++) { final int c = reversed[compIDX] * groupHead.compare(compIDX, doc); if (c < 0) { @@ -125,17 +153,33 @@ public void collect(int doc) throws IOException { groupHead.updateDocHead(doc); } - /** - * Contains the result of group head retrieval. - * To prevent new object creations of this class for every collect. - */ - protected class TemporalResult { + @Override + public boolean needsScores() { + return sort.needsScores(); + } - public GroupHead groupHead; - public boolean stop; + @Override + protected void doSetNextReader(LeafReaderContext context) throws IOException { + groupSelector.setNextReader(context); + this.context = context; + for (GroupHead head : heads.values()) { + head.setNextReader(context); + } + } + @Override + public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + for (GroupHead head : heads.values()) { + head.setScorer(scorer); + } } + /** + * Create a new GroupHead for the given group value, initialized with a doc, context and scorer + */ + protected abstract GroupHead newGroupHead(int doc, T value, LeafReaderContext context, Scorer scorer) throws IOException; + /** * Represents a group head. A group head is the most relevant document for a particular group. * The relevancy is based is usually based on the sort. @@ -147,11 +191,29 @@ public static abstract class GroupHead { public final T groupValue; public int doc; - protected GroupHead(T groupValue, int doc) { + protected int docBase; + + /** + * Create a new GroupHead for the given value + */ + protected GroupHead(T groupValue, int doc, int docBase) { this.groupValue = groupValue; - this.doc = doc; + this.doc = doc + docBase; + this.docBase = docBase; + } + + /** + * Called for each segment + */ + protected void setNextReader(LeafReaderContext ctx) throws IOException { + this.docBase = ctx.docBase; } + /** + * Called for each segment + */ + protected abstract void setScorer(Scorer scorer) throws IOException; + /** * Compares the specified document for a specified comparator against the current most relevant document. * @@ -173,4 +235,117 @@ protected GroupHead(T groupValue, int doc) { } + /** + * General implementation using a {@link FieldComparator} to select the group head + */ + private static class SortingGroupHeadsCollector extends AllGroupHeadsCollector { + + protected SortingGroupHeadsCollector(GroupSelector selector, Sort sort) { + super(selector, sort); + } + + @Override + protected GroupHead newGroupHead(int doc, T value, LeafReaderContext ctx, Scorer scorer) throws IOException { + return new SortingGroupHead<>(sort, value, doc, ctx, scorer); + } + } + + private static class SortingGroupHead extends GroupHead { + + final FieldComparator[] comparators; + final LeafFieldComparator[] leafComparators; + + protected SortingGroupHead(Sort sort, T groupValue, int doc, LeafReaderContext context, Scorer scorer) throws IOException { + super(groupValue, doc, context.docBase); + final SortField[] sortFields = sort.getSort(); + comparators = new FieldComparator[sortFields.length]; + leafComparators = new LeafFieldComparator[sortFields.length]; + for (int i = 0; i < sortFields.length; i++) { + comparators[i] = sortFields[i].getComparator(1, i); + leafComparators[i] = comparators[i].getLeafComparator(context); + leafComparators[i].setScorer(scorer); + leafComparators[i].copy(0, doc); + leafComparators[i].setBottom(0); + } + } + + @Override + public void setNextReader(LeafReaderContext ctx) throws IOException { + super.setNextReader(ctx); + for (int i = 0; i < comparators.length; i++) { + leafComparators[i] = comparators[i].getLeafComparator(ctx); + } + } + + @Override + protected void setScorer(Scorer scorer) throws IOException { + for (LeafFieldComparator c : leafComparators) { + c.setScorer(scorer); + } + } + + @Override + public int compare(int compIDX, int doc) throws IOException { + return leafComparators[compIDX].compareBottom(doc); + } + + @Override + public void updateDocHead(int doc) throws IOException { + for (LeafFieldComparator comparator : leafComparators) { + comparator.copy(0, doc); + comparator.setBottom(0); + } + this.doc = doc + docBase; + } + } + + /** + * Specialized implementation for sorting by score + */ + private static class ScoringGroupHeadsCollector extends AllGroupHeadsCollector { + + protected ScoringGroupHeadsCollector(GroupSelector selector, Sort sort) { + super(selector, sort); + } + + @Override + protected GroupHead newGroupHead(int doc, T value, LeafReaderContext context, Scorer scorer) throws IOException { + return new ScoringGroupHead<>(scorer, value, doc, context.docBase); + } + } + + private static class ScoringGroupHead extends GroupHead { + + private Scorer scorer; + private float topScore; + + protected ScoringGroupHead(Scorer scorer, T groupValue, int doc, int docBase) throws IOException { + super(groupValue, doc, docBase); + assert scorer.docID() == doc; + this.scorer = scorer; + this.topScore = scorer.score(); + } + + @Override + protected void setScorer(Scorer scorer) { + this.scorer = scorer; + } + + @Override + protected int compare(int compIDX, int doc) throws IOException { + assert scorer.docID() == doc; + assert compIDX == 0; + float score = scorer.score(); + int c = Float.compare(score, topScore); + if (c > 0) + topScore = score; + return c; + } + + @Override + protected void updateDocHead(int doc) throws IOException { + this.doc = doc + docBase; + } + } + } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java index af697af5a843..8434534df4de 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java @@ -18,23 +18,34 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SimpleCollector; -import org.apache.lucene.util.BytesRef; /** * A collector that collects all groups that match the * query. Only the group value is collected, and the order * is undefined. This collector does not determine * the most relevant document of a group. - *

- * This is an abstract version. Concrete implementations define - * what a group actually is and how it is internally collected. * * @lucene.experimental */ -public abstract class AllGroupsCollector extends SimpleCollector { +public class AllGroupsCollector extends SimpleCollector { + + private final GroupSelector groupSelector; + + private final Set groups = new HashSet(); + + /** + * Create a new AllGroupsCollector + * @param groupSelector the GroupSelector to determine groups + */ + public AllGroupsCollector(GroupSelector groupSelector) { + this.groupSelector = groupSelector; + } /** * Returns the total number of groups for the executed search. @@ -49,17 +60,30 @@ public int getGroupCount() { /** * Returns the group values *

- * This is an unordered collections of group values. For each group that matched the query there is a {@link BytesRef} - * representing a group value. + * This is an unordered collections of group values. * * @return the group values */ - public abstract Collection getGroups(); + public Collection getGroups() { + return groups; + } - // Empty not necessary @Override public void setScorer(Scorer scorer) throws IOException {} + @Override + protected void doSetNextReader(LeafReaderContext context) throws IOException { + groupSelector.setNextReader(context); + } + + @Override + public void collect(int doc) throws IOException { + groupSelector.advanceTo(doc); + if (groups.contains(groupSelector.currentValue())) + return; + groups.add(groupSelector.copyValue()); + } + @Override public boolean needsScores() { return false; // the result is unaffected by relevancy diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java index c965042a8553..a50fda103d95 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java @@ -50,7 +50,7 @@ * being that the documents in each group must always be * indexed as a block. This collector also fills in * TopGroups.totalGroupCount without requiring the separate - * {@link org.apache.lucene.search.grouping.term.TermAllGroupsCollector}. However, this collector does + * {@link org.apache.lucene.search.grouping.AllGroupsCollector}. However, this collector does * not fill in the groupValue of each group; this field * will always be null. * diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java index 54d752c58766..103b0d26a707 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java @@ -16,10 +16,14 @@ */ package org.apache.lucene.search.grouping; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.SimpleCollector; /** @@ -27,33 +31,99 @@ * * @lucene.experimental */ -public abstract class DistinctValuesCollector extends SimpleCollector { +public class DistinctValuesCollector extends SecondPassGroupingCollector { + + /** + * Create a DistinctValuesCollector + * @param groupSelector the group selector to determine the top-level groups + * @param groups the top-level groups to collect for + * @param valueSelector a group selector to determine which values to collect per-group + */ + public DistinctValuesCollector(GroupSelector groupSelector, Collection> groups, + GroupSelector valueSelector) { + super(groupSelector, groups, new DistinctValuesReducer<>(valueSelector)); + } + + private static class ValuesCollector extends SimpleCollector { + + final GroupSelector valueSelector; + final Set values = new HashSet<>(); + + private ValuesCollector(GroupSelector valueSelector) { + this.valueSelector = valueSelector; + } + + @Override + public void collect(int doc) throws IOException { + if (valueSelector.advanceTo(doc) == GroupSelector.State.ACCEPT) { + R value = valueSelector.currentValue(); + if (values.contains(value) == false) + values.add(valueSelector.copyValue()); + } + else { + if (values.contains(null) == false) + values.add(null); + } + } + + @Override + protected void doSetNextReader(LeafReaderContext context) throws IOException { + valueSelector.setNextReader(context); + } + + @Override + public boolean needsScores() { + return false; + } + } + + private static class DistinctValuesReducer extends GroupReducer> { + + final GroupSelector valueSelector; + + private DistinctValuesReducer(GroupSelector valueSelector) { + this.valueSelector = valueSelector; + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + protected ValuesCollector newCollector() { + return new ValuesCollector<>(valueSelector); + } + } /** * Returns all unique values for each top N group. * * @return all unique values for each top N group */ - public abstract List> getGroups(); + public List> getGroups() { + List> counts = new ArrayList<>(); + for (SearchGroup group : groups) { + @SuppressWarnings("unchecked") + ValuesCollector vc = (ValuesCollector) groupReducer.getCollector(group.groupValue); + counts.add(new GroupCount<>(group.groupValue, vc.values)); + } + return counts; + } /** * Returned by {@link DistinctValuesCollector#getGroups()}, * representing the value and set of distinct values for the group. */ - public static class GroupCount { + public static class GroupCount { public final T groupValue; - public final Set uniqueValues; + public final Set uniqueValues; - public GroupCount(T groupValue) { + public GroupCount(T groupValue, Set values) { this.groupValue = groupValue; - this.uniqueValues = new HashSet<>(); + this.uniqueValues = values; } } - @Override - public boolean needsScores() { - return false; // not needed to fetch all values - } - } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java index 02bb1a276379..bd47adbb1f7f 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java @@ -33,15 +33,16 @@ /** FirstPassGroupingCollector is the first of two passes necessary * to collect grouped hits. This pass gathers the top N sorted - * groups. Concrete subclasses define what a group is and how it - * is internally collected. + * groups. Groups are defined by a {@link GroupSelector} * *

See {@link org.apache.lucene.search.grouping} for more * details including a full code example.

* * @lucene.experimental */ -abstract public class FirstPassGroupingCollector extends SimpleCollector { +public class FirstPassGroupingCollector extends SimpleCollector { + + private final GroupSelector groupSelector; private final FieldComparator[] comparators; private final LeafFieldComparator[] leafComparators; @@ -60,16 +61,18 @@ abstract public class FirstPassGroupingCollector extends SimpleCollector { /** * Create the first pass collector. * - * @param groupSort The {@link Sort} used to sort the + * @param groupSelector a GroupSelector used to defined groups + * @param groupSort The {@link Sort} used to sort the * groups. The top sorted document within each group * according to groupSort, determines how that group * sorts against other groups. This must be non-null, * ie, if you want to groupSort by relevance use * Sort.RELEVANCE. - * @param topNGroups How many top groups to keep. + * @param topNGroups How many top groups to keep. */ @SuppressWarnings({"unchecked", "rawtypes"}) - public FirstPassGroupingCollector(Sort groupSort, int topNGroups) { + public FirstPassGroupingCollector(GroupSelector groupSelector, Sort groupSort, int topNGroups) { + this.groupSelector = groupSelector; if (topNGroups < 1) { throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")"); } @@ -133,7 +136,7 @@ public Collection> getTopGroups(int groupOffset, boolean fillFiel if (upto++ < groupOffset) { continue; } - //System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); + // System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.toString())); SearchGroup searchGroup = new SearchGroup<>(); searchGroup.groupValue = group.groupValue; if (fillFields) { @@ -155,14 +158,11 @@ public void setScorer(Scorer scorer) throws IOException { } } - @Override - public void collect(int doc) throws IOException { - //System.out.println("FP.collect doc=" + doc); - + private boolean isCompetitive(int doc) throws IOException { // If orderedGroups != null we already have collected N groups and // can short circuit by comparing this document to the bottom group, // without having to find what group this document belongs to. - + // Even if this document belongs to a group in the top N, we'll know that // we don't have to update that group. @@ -173,7 +173,7 @@ public void collect(int doc) throws IOException { final int c = reversed[compIDX] * leafComparators[compIDX].compareBottom(doc); if (c < 0) { // Definitely not competitive. So don't even bother to continue - return; + return false; } else if (c > 0) { // Definitely competitive. break; @@ -181,15 +181,24 @@ public void collect(int doc) throws IOException { // Here c=0. If we're at the last comparator, this doc is not // competitive, since docs are visited in doc Id order, which means // this doc cannot compete with any other document in the queue. - return; + return false; } } } + return true; + } + + @Override + public void collect(int doc) throws IOException { + + if (isCompetitive(doc) == false) + return; // TODO: should we add option to mean "ignore docs that // don't have the group field" (instead of stuffing them // under null group)? - final T groupValue = getDocGroupValue(doc); + groupSelector.advanceTo(doc); + T groupValue = groupSelector.currentValue(); final CollectedSearchGroup group = groupMap.get(groupValue); @@ -207,7 +216,7 @@ public void collect(int doc) throws IOException { // Add a new CollectedSearchGroup: CollectedSearchGroup sg = new CollectedSearchGroup<>(); - sg.groupValue = copyDocGroupValue(groupValue, null); + sg.groupValue = groupSelector.copyValue(); sg.comparatorSlot = groupMap.size(); sg.topDoc = docBase + doc; for (LeafFieldComparator fc : leafComparators) { @@ -233,7 +242,7 @@ public void collect(int doc) throws IOException { groupMap.remove(bottomGroup.groupValue); // reuse the removed CollectedSearchGroup - bottomGroup.groupValue = copyDocGroupValue(groupValue, bottomGroup.groupValue); + bottomGroup.groupValue = groupSelector.copyValue(); bottomGroup.topDoc = docBase + doc; for (LeafFieldComparator fc : leafComparators) { @@ -338,25 +347,15 @@ protected void doSetNextReader(LeafReaderContext readerContext) throws IOExcepti for (int i=0; i getGroupSelector() { + return groupSelector; + } } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java new file mode 100644 index 000000000000..4366e9123f78 --- /dev/null +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupReducer.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.search.grouping; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorer; + +/** + * Concrete implementations of this class define what to collect for individual + * groups during the second-pass of a grouping search. + * + * Each group is assigned a Collector returned by {@link #newCollector()}, and + * {@link LeafCollector#collect(int)} is called for each document that is in + * a group + * + * @see SecondPassGroupingCollector + * + * @param the type of the value used for grouping + * @param the type of {@link Collector} used to reduce each group + */ +public abstract class GroupReducer { + + private final Map> groups = new HashMap<>(); + + /** + * Define which groups should be reduced. + * + * Called by {@link SecondPassGroupingCollector} + */ + public void setGroups(Collection> groups) { + for (SearchGroup group : groups) { + this.groups.put(group.groupValue, new GroupCollector<>(newCollector())); + } + } + + /** + * Whether or not this reducer requires collected documents to be scored + */ + public abstract boolean needsScores(); + + /** + * Creates a new Collector for each group + */ + protected abstract C newCollector(); + + /** + * Get the Collector for a given group + */ + public final C getCollector(T value) { + return groups.get(value).collector; + } + + /** + * Collect a given document into a given group + * @throws IOException on error + */ + public final void collect(T value, int doc) throws IOException { + GroupCollector collector = groups.get(value); + collector.leafCollector.collect(doc); + } + + /** + * Set the Scorer on all group collectors + */ + public final void setScorer(Scorer scorer) throws IOException { + for (GroupCollector collector : groups.values()) { + collector.leafCollector.setScorer(scorer); + } + } + + /** + * Called when the parent {@link SecondPassGroupingCollector} moves to a new segment + */ + public final void setNextReader(LeafReaderContext ctx) throws IOException { + for (GroupCollector collector : groups.values()) { + collector.leafCollector = collector.collector.getLeafCollector(ctx); + } + } + + private static final class GroupCollector { + + final C collector; + LeafCollector leafCollector; + + private GroupCollector(C collector) { + this.collector = collector; + } + } + +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java new file mode 100644 index 000000000000..dbb09329f8ae --- /dev/null +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupSelector.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.search.grouping; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.lucene.index.LeafReaderContext; + +/** + * Defines a group, for use by grouping collectors + * + * A GroupSelector acts as an iterator over documents. For each segment, clients + * should call {@link #setNextReader(LeafReaderContext)}, and then {@link #advanceTo(int)} + * for each matching document. + * + * @param the type of the group value + */ +public abstract class GroupSelector { + + /** + * What to do with the current value + */ + public enum State { SKIP, ACCEPT } + + /** + * Set the LeafReaderContext + */ + public abstract void setNextReader(LeafReaderContext readerContext) throws IOException; + + /** + * Advance the GroupSelector's iterator to the given document + */ + public abstract State advanceTo(int doc) throws IOException; + + /** + * Get the group value of the current document + * + * N.B. this object may be reused, for a persistent version use {@link #copyValue()} + */ + public abstract T currentValue(); + + /** + * @return a copy of the group value of the current document + */ + public abstract T copyValue(); + + /** + * Set a restriction on the group values returned by this selector + * + * If the selector is positioned on a document whose group value is not contained + * within this set, then {@link #advanceTo(int)} will return {@link State#SKIP} + * + * @param groups a set of {@link SearchGroup} objects to limit selections to + */ + public abstract void setGroups(Collection> groups); + +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java deleted file mode 100644 index 2ff79a1331b2..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.search.grouping; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.lucene.search.Sort; - -/** - * A factory object to create first and second-pass collectors, run by a {@link GroupingSearch} - * @param the type the group value - */ -public abstract class Grouper { - - /** - * Create a first-pass collector - * @param sort the order in which groups should be returned - * @param count how many groups to return - */ - public abstract FirstPassGroupingCollector getFirstPassCollector(Sort sort, int count) throws IOException; - - /** - * Create an {@link AllGroupsCollector} - */ - public abstract AllGroupsCollector getAllGroupsCollector(); - - /** - * Create an {@link AllGroupHeadsCollector} - * @param sort a within-group sort order to determine which doc is the group head - */ - public abstract AllGroupHeadsCollector getGroupHeadsCollector(Sort sort); - - /** - * Create a second-pass collector - */ - public abstract SecondPassGroupingCollector getSecondPassCollector( - Collection> groups, Sort groupSort, Sort withinGroupSort, - int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException; - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java index f4319d578d35..a36917d742ac 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java @@ -30,8 +30,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.grouping.function.FunctionGrouper; -import org.apache.lucene.search.grouping.term.TermGrouper; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.mutable.MutableValue; @@ -43,7 +41,7 @@ */ public class GroupingSearch { - private final Grouper grouper; + private final GroupSelector grouper; private final Query groupEndDocs; private Sort groupSort = Sort.RELEVANCE; @@ -71,11 +69,7 @@ public class GroupingSearch { * @param groupField The name of the field to group by. */ public GroupingSearch(String groupField) { - this(new TermGrouper(groupField, 128), null); - } - - public GroupingSearch(String groupField, int initialSize) { - this(new TermGrouper(groupField, initialSize), null); + this(new TermGroupSelector(groupField), null); } /** @@ -86,7 +80,7 @@ public GroupingSearch(String groupField, int initialSize) { * @param valueSourceContext The context of the specified groupFunction */ public GroupingSearch(ValueSource groupFunction, Map valueSourceContext) { - this(new FunctionGrouper(groupFunction, valueSourceContext), null); + this(new ValueSourceGroupSelector(groupFunction, valueSourceContext), null); } /** @@ -99,7 +93,7 @@ public GroupingSearch(Query groupEndDocs) { this(null, groupEndDocs); } - private GroupingSearch(Grouper grouper, Query groupEndDocs) { + private GroupingSearch(GroupSelector grouper, Query groupEndDocs) { this.grouper = grouper; this.groupEndDocs = groupEndDocs; } @@ -129,10 +123,10 @@ public TopGroups search(IndexSearcher searcher, Query query, int groupOff protected TopGroups groupByFieldOrFunction(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException { int topN = groupOffset + groupLimit; - final FirstPassGroupingCollector firstPassCollector = grouper.getFirstPassCollector(groupSort, topN); - final AllGroupsCollector allGroupsCollector = allGroups ? grouper.getAllGroupsCollector() : null; + final FirstPassGroupingCollector firstPassCollector = new FirstPassGroupingCollector(grouper, groupSort, topN); + final AllGroupsCollector allGroupsCollector = allGroups ? new AllGroupsCollector(grouper) : null; final AllGroupHeadsCollector allGroupHeadsCollector - = allGroupHeads ? grouper.getGroupHeadsCollector(sortWithinGroup) : null; + = allGroupHeads ? AllGroupHeadsCollector.newCollector(grouper, sortWithinGroup) : null; final Collector firstRound = MultiCollector.wrap(firstPassCollector, allGroupsCollector, allGroupHeadsCollector); @@ -158,8 +152,8 @@ protected TopGroups groupByFieldOrFunction(IndexSearcher searcher, Query query, } int topNInsideGroup = groupDocsOffset + groupDocsLimit; - SecondPassGroupingCollector secondPassCollector - = grouper.getSecondPassCollector(topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup, + TopGroupsCollector secondPassCollector + = new TopGroupsCollector(grouper, topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup, includeScores, includeMaxScore, fillSortFields); if (cachedCollector != null && cachedCollector.isCached()) { diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java index f8feb756f540..c54c8eee4a81 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java @@ -18,152 +18,82 @@ import java.io.IOException; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; import java.util.Objects; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SimpleCollector; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.search.TopFieldCollector; -import org.apache.lucene.search.TopScoreDocCollector; /** - * SecondPassGroupingCollector is the second of two passes - * necessary to collect grouped docs. This pass gathers the - * top N documents per top group computed from the - * first pass. Concrete subclasses define what a group is and how it - * is internally collected. + * SecondPassGroupingCollector runs over an already collected set of + * groups, further applying a {@link GroupReducer} to each group * - *

See {@link org.apache.lucene.search.grouping} for more - * details including a full code example.

+ * @see TopGroupsCollector + * @see DistinctValuesCollector * * @lucene.experimental */ -public abstract class SecondPassGroupingCollector extends SimpleCollector { +public class SecondPassGroupingCollector extends SimpleCollector { - private final Collection> groups; - private final Sort groupSort; - private final Sort withinGroupSort; - private final int maxDocsPerGroup; - private final boolean needsScores; - protected final Map> groupMap; + protected final GroupSelector groupSelector; + protected final Collection> groups; + protected final GroupReducer groupReducer; - protected SearchGroupDocs[] groupDocs; + protected int totalHitCount; + protected int totalGroupedHitCount; - private int totalHitCount; - private int totalGroupedHitCount; - - public SecondPassGroupingCollector(Collection> groups, Sort groupSort, Sort withinGroupSort, - int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) - throws IOException { + /** + * Create a new SecondPassGroupingCollector + * @param groupSelector the GroupSelector that defines groups for this search + * @param groups the groups to collect documents for + * @param reducer the reducer to apply to each group + */ + public SecondPassGroupingCollector(GroupSelector groupSelector, Collection> groups, GroupReducer reducer) { //System.out.println("SP init"); if (groups.isEmpty()) { throw new IllegalArgumentException("no groups to collect (groups is empty)"); } + this.groupSelector = Objects.requireNonNull(groupSelector); + this.groupSelector.setGroups(groups); + this.groups = Objects.requireNonNull(groups); - this.groupSort = Objects.requireNonNull(groupSort); - this.withinGroupSort = Objects.requireNonNull(withinGroupSort); - this.maxDocsPerGroup = maxDocsPerGroup; - this.needsScores = getScores || getMaxScores || withinGroupSort.needsScores(); + this.groupReducer = reducer; + reducer.setGroups(groups); + } - this.groupMap = new HashMap<>(groups.size()); - for (SearchGroup group : groups) { - //System.out.println(" prep group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); - final TopDocsCollector collector; - if (withinGroupSort.equals(Sort.RELEVANCE)) { // optimize to use TopScoreDocCollector - // Sort by score - collector = TopScoreDocCollector.create(maxDocsPerGroup); - } else { - // Sort by fields - collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores); - } - groupMap.put(group.groupValue, new SearchGroupDocs<>(group.groupValue, collector)); - } + /** + * @return the GroupSelector used in this collector + */ + public GroupSelector getGroupSelector() { + return groupSelector; } @Override public boolean needsScores() { - return needsScores; + return groupReducer.needsScores(); } @Override public void setScorer(Scorer scorer) throws IOException { - for (SearchGroupDocs group : groupMap.values()) { - group.leafCollector.setScorer(scorer); - } + groupReducer.setScorer(scorer); } @Override public void collect(int doc) throws IOException { totalHitCount++; - SearchGroupDocs group = retrieveGroup(doc); - if (group != null) { - totalGroupedHitCount++; - group.leafCollector.collect(doc); - } + if (groupSelector.advanceTo(doc) == GroupSelector.State.SKIP) + return; + totalGroupedHitCount++; + T value = groupSelector.currentValue(); + groupReducer.collect(value, doc); } - /** - * Returns the group the specified doc belongs to or null if no group could be retrieved. - * - * @param doc The specified doc - * @return the group the specified doc belongs to or null if no group could be retrieved - * @throws IOException If an I/O related error occurred - */ - protected abstract SearchGroupDocs retrieveGroup(int doc) throws IOException; - @Override protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { - //System.out.println("SP.setNextReader"); - for (SearchGroupDocs group : groupMap.values()) { - group.leafCollector = group.collector.getLeafCollector(readerContext); - } - } - - public TopGroups getTopGroups(int withinGroupOffset) { - @SuppressWarnings({"unchecked","rawtypes"}) - final GroupDocs[] groupDocsResult = (GroupDocs[]) new GroupDocs[groups.size()]; - - int groupIDX = 0; - float maxScore = Float.MIN_VALUE; - for(SearchGroup group : groups) { - final SearchGroupDocs groupDocs = groupMap.get(group.groupValue); - final TopDocs topDocs = groupDocs.collector.topDocs(withinGroupOffset, maxDocsPerGroup); - groupDocsResult[groupIDX++] = new GroupDocs<>(Float.NaN, - topDocs.getMaxScore(), - topDocs.totalHits, - topDocs.scoreDocs, - groupDocs.groupValue, - group.sortValues); - maxScore = Math.max(maxScore, topDocs.getMaxScore()); - } - - return new TopGroups<>(groupSort.getSort(), - withinGroupSort.getSort(), - totalHitCount, totalGroupedHitCount, groupDocsResult, - maxScore); + groupReducer.setNextReader(readerContext); + groupSelector.setNextReader(readerContext); } - - // TODO: merge with SearchGroup or not? - // ad: don't need to build a new hashmap - // disad: blows up the size of SearchGroup if we need many of them, and couples implementations - public class SearchGroupDocs { - - public final T groupValue; - public final TopDocsCollector collector; - public LeafCollector leafCollector; - - public SearchGroupDocs(T groupValue, TopDocsCollector collector) { - this.groupValue = groupValue; - this.collector = collector; - } - } } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java similarity index 97% rename from lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java rename to lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java index cee327cbf473..39d28a579962 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupFacetCollector.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.lucene.search.grouping.term; +package org.apache.lucene.search.grouping; import java.io.IOException; import java.util.ArrayList; @@ -25,7 +25,6 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.grouping.GroupFacetCollector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.SentinelIntSet; @@ -401,15 +400,15 @@ protected void nextTerm() throws IOException { } } } -} -class GroupedFacetHit { + private static class GroupedFacetHit { - final BytesRef groupValue; - final BytesRef facetValue; + final BytesRef groupValue; + final BytesRef facetValue; - GroupedFacetHit(BytesRef groupValue, BytesRef facetValue) { - this.groupValue = groupValue; - this.facetValue = facetValue; + GroupedFacetHit(BytesRef groupValue, BytesRef facetValue) { + this.groupValue = groupValue; + this.facetValue = facetValue; + } } } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java new file mode 100644 index 000000000000..5b8f77c2ce83 --- /dev/null +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TermGroupSelector.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.search.grouping; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefHash; + +/** + * A GroupSelector implementation that groups via SortedDocValues + */ +public class TermGroupSelector extends GroupSelector { + + private final String field; + private final BytesRefHash values = new BytesRefHash(); + private final Map ordsToGroupIds = new HashMap<>(); + + private SortedDocValues docValues; + private int groupId; + + private boolean secondPass; + private boolean includeEmpty; + + /** + * Create a new TermGroupSelector + * @param field the SortedDocValues field to use for grouping + */ + public TermGroupSelector(String field) { + this.field = field; + } + + @Override + public void setNextReader(LeafReaderContext readerContext) throws IOException { + this.docValues = DocValues.getSorted(readerContext.reader(), field); + this.ordsToGroupIds.clear(); + BytesRef scratch = new BytesRef(); + for (int i = 0; i < values.size(); i++) { + values.get(i, scratch); + int ord = this.docValues.lookupTerm(scratch); + if (ord >= 0) + ordsToGroupIds.put(ord, i); + } + } + + @Override + public State advanceTo(int doc) throws IOException { + if (this.docValues.advanceExact(doc) == false) { + groupId = -1; + return includeEmpty ? State.ACCEPT : State.SKIP; + } + int ord = docValues.ordValue(); + if (ordsToGroupIds.containsKey(ord)) { + groupId = ordsToGroupIds.get(ord); + return State.ACCEPT; + } + if (secondPass) + return State.SKIP; + groupId = values.add(docValues.binaryValue()); + ordsToGroupIds.put(ord, groupId); + return State.ACCEPT; + } + + private BytesRef scratch = new BytesRef(); + + @Override + public BytesRef currentValue() { + if (groupId == -1) + return null; + values.get(groupId, scratch); + return scratch; + } + + @Override + public BytesRef copyValue() { + if (groupId == -1) + return null; + return BytesRef.deepCopyOf(currentValue()); + } + + @Override + public void setGroups(Collection> searchGroups) { + this.values.clear(); + this.values.reinit(); + for (SearchGroup sg : searchGroups) { + if (sg.groupValue == null) + includeEmpty = true; + else + this.values.add(sg.groupValue); + } + this.secondPass = true; + } +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java new file mode 100644 index 000000000000..b6c71d37a8d2 --- /dev/null +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroupsCollector.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.search.grouping; + +import java.util.Collection; +import java.util.Objects; +import java.util.function.Supplier; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopDocsCollector; +import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopScoreDocCollector; + +/** + * A second-pass collector that collects the TopDocs for each group, and + * returns them as a {@link TopGroups} object + * + * @param the type of the group value + */ +public class TopGroupsCollector extends SecondPassGroupingCollector { + + private final Sort groupSort; + private final Sort withinGroupSort; + private final int maxDocsPerGroup; + + /** + * Create a new TopGroupsCollector + * @param groupSelector the group selector used to define groups + * @param groups the groups to collect TopDocs for + * @param groupSort the order in which groups are returned + * @param withinGroupSort the order in which documents are sorted in each group + * @param maxDocsPerGroup the maximum number of docs to collect for each group + * @param getScores if true, record the scores of all docs in each group + * @param getMaxScores if true, record the maximum score for each group + * @param fillSortFields if true, record the sort field values for all docs + */ + public TopGroupsCollector(GroupSelector groupSelector, Collection> groups, Sort groupSort, Sort withinGroupSort, + int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) { + super(groupSelector, groups, + new TopDocsReducer<>(withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields)); + this.groupSort = Objects.requireNonNull(groupSort); + this.withinGroupSort = Objects.requireNonNull(withinGroupSort); + this.maxDocsPerGroup = maxDocsPerGroup; + + } + + private static class TopDocsReducer extends GroupReducer> { + + private final Supplier> supplier; + private final boolean needsScores; + + TopDocsReducer(Sort withinGroupSort, + int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) { + this.needsScores = getScores || getMaxScores || withinGroupSort.needsScores(); + this.supplier = withinGroupSort == Sort.RELEVANCE ? + () -> TopScoreDocCollector.create(maxDocsPerGroup) : + () -> TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores); + } + + @Override + public boolean needsScores() { + return needsScores; + } + + @Override + protected TopDocsCollector newCollector() { + return supplier.get(); + } + } + + /** + * Get the TopGroups recorded by this collector + * @param withinGroupOffset the offset within each group to start collecting documents + */ + public TopGroups getTopGroups(int withinGroupOffset) { + @SuppressWarnings({"unchecked","rawtypes"}) + final GroupDocs[] groupDocsResult = (GroupDocs[]) new GroupDocs[groups.size()]; + + int groupIDX = 0; + float maxScore = Float.MIN_VALUE; + for(SearchGroup group : groups) { + TopDocsCollector collector = (TopDocsCollector) groupReducer.getCollector(group.groupValue); + final TopDocs topDocs = collector.topDocs(withinGroupOffset, maxDocsPerGroup); + groupDocsResult[groupIDX++] = new GroupDocs<>(Float.NaN, + topDocs.getMaxScore(), + topDocs.totalHits, + topDocs.scoreDocs, + group.groupValue, + group.sortValues); + maxScore = Math.max(maxScore, topDocs.getMaxScore()); + } + + return new TopGroups<>(groupSort.getSort(), + withinGroupSort.getSort(), + totalHitCount, totalGroupedHitCount, groupDocsResult, + maxScore); + } + + +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java new file mode 100644 index 000000000000..249016018855 --- /dev/null +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/ValueSourceGroupSelector.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.search.grouping; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.queries.function.FunctionValues; +import org.apache.lucene.queries.function.ValueSource; +import org.apache.lucene.util.mutable.MutableValue; + +/** + * A GroupSelector that groups via a ValueSource + */ +public class ValueSourceGroupSelector extends GroupSelector { + + private final ValueSource valueSource; + private final Map context; + + private Set secondPassGroups; + + /** + * Create a new ValueSourceGroupSelector + * @param valueSource the ValueSource to group by + * @param context a context map for the ValueSource + */ + public ValueSourceGroupSelector(ValueSource valueSource, Map context) { + this.valueSource = valueSource; + this.context = context; + } + + private FunctionValues.ValueFiller filler; + + @Override + public void setNextReader(LeafReaderContext readerContext) throws IOException { + FunctionValues values = valueSource.getValues(context, readerContext); + this.filler = values.getValueFiller(); + } + + @Override + public State advanceTo(int doc) throws IOException { + this.filler.fillValue(doc); + if (secondPassGroups != null) { + if (secondPassGroups.contains(filler.getValue()) == false) + return State.SKIP; + } + return State.ACCEPT; + } + + @Override + public MutableValue currentValue() { + return filler.getValue(); + } + + @Override + public MutableValue copyValue() { + return filler.getValue().duplicate(); + } + + @Override + public void setGroups(Collection> searchGroups) { + secondPassGroups = new HashSet<>(); + for (SearchGroup group : searchGroups) { + secondPassGroups.add(group.groupValue); + } + } +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java deleted file mode 100644 index f4d46682497a..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.function; - -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.queries.function.FunctionValues; -import org.apache.lucene.queries.function.ValueSource; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.grouping.AllGroupHeadsCollector; -import org.apache.lucene.util.mutable.MutableValue; - -/** - * An implementation of {@link AllGroupHeadsCollector} for retrieving the most relevant groups when grouping - * by {@link ValueSource}. - * - * @lucene.experimental - */ -public class FunctionAllGroupHeadsCollector extends AllGroupHeadsCollector { - - private final ValueSource groupBy; - private final Map vsContext; - private final Map groups; - private final Sort sortWithinGroup; - - private FunctionValues.ValueFiller filler; - private MutableValue mval; - private LeafReaderContext readerContext; - private Scorer scorer; - - /** - * Constructs a {@link FunctionAllGroupHeadsCollector} instance. - * - * @param groupBy The {@link ValueSource} to group by - * @param vsContext The ValueSource context - * @param sortWithinGroup The sort within a group - */ - public FunctionAllGroupHeadsCollector(ValueSource groupBy, Map vsContext, Sort sortWithinGroup) { - super(sortWithinGroup.getSort().length); - groups = new HashMap<>(); - this.sortWithinGroup = sortWithinGroup; - this.groupBy = groupBy; - this.vsContext = vsContext; - - final SortField[] sortFields = sortWithinGroup.getSort(); - for (int i = 0; i < sortFields.length; i++) { - reversed[i] = sortFields[i].getReverse() ? -1 : 1; - } - } - - @Override - protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException { - filler.fillValue(doc); - FunctionGroupHead groupHead = groups.get(mval); - if (groupHead == null) { - MutableValue groupValue = mval.duplicate(); - groupHead = new FunctionGroupHead(groupValue, sortWithinGroup, doc); - groups.put(groupValue, groupHead); - temporalResult.stop = true; - } else { - temporalResult.stop = false; - } - this.temporalResult.groupHead = groupHead; - } - - @Override - protected Collection getCollectedGroupHeads() { - return groups.values(); - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - for (FunctionGroupHead groupHead : groups.values()) { - for (LeafFieldComparator comparator : groupHead.leafComparators) { - comparator.setScorer(scorer); - } - } - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - this.readerContext = context; - FunctionValues values = groupBy.getValues(vsContext, context); - filler = values.getValueFiller(); - mval = filler.getValue(); - - for (FunctionGroupHead groupHead : groups.values()) { - for (int i = 0; i < groupHead.comparators.length; i++) { - groupHead.leafComparators[i] = groupHead.comparators[i].getLeafComparator(context); - } - } - } - - /** Holds current head document for a single group. - * - * @lucene.experimental */ - public class FunctionGroupHead extends AllGroupHeadsCollector.GroupHead { - - final FieldComparator[] comparators; - final LeafFieldComparator[] leafComparators; - - @SuppressWarnings({"unchecked","rawtypes"}) - private FunctionGroupHead(MutableValue groupValue, Sort sort, int doc) throws IOException { - super(groupValue, doc + readerContext.docBase); - final SortField[] sortFields = sort.getSort(); - comparators = new FieldComparator[sortFields.length]; - leafComparators = new LeafFieldComparator[sortFields.length]; - for (int i = 0; i < sortFields.length; i++) { - comparators[i] = sortFields[i].getComparator(1, i); - leafComparators[i] = comparators[i].getLeafComparator(readerContext); - leafComparators[i].setScorer(scorer); - leafComparators[i].copy(0, doc); - leafComparators[i].setBottom(0); - } - } - - @Override - public int compare(int compIDX, int doc) throws IOException { - return leafComparators[compIDX].compareBottom(doc); - } - - @Override - public void updateDocHead(int doc) throws IOException { - for (LeafFieldComparator comparator : leafComparators) { - comparator.copy(0, doc); - comparator.setBottom(0); - } - this.doc = doc + readerContext.docBase; - } - } - - @Override - public boolean needsScores() { - return sortWithinGroup.needsScores(); - } -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java deleted file mode 100644 index 1609d4d7bcff..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.function; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.queries.function.FunctionValues; -import org.apache.lucene.queries.function.ValueSource; -import org.apache.lucene.search.grouping.AllGroupsCollector; -import org.apache.lucene.util.mutable.MutableValue; - -import java.io.IOException; -import java.util.Collection; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; - -/** - * A collector that collects all groups that match the - * query. Only the group value is collected, and the order - * is undefined. This collector does not determine - * the most relevant document of a group. - *

- * Implementation detail: Uses {@link ValueSource} and {@link FunctionValues} to retrieve the - * field values to group by. - * - * @lucene.experimental - */ -public class FunctionAllGroupsCollector extends AllGroupsCollector { - - private final Map vsContext; - private final ValueSource groupBy; - private final SortedSet groups = new TreeSet<>(); - - private FunctionValues.ValueFiller filler; - private MutableValue mval; - - /** - * Constructs a {@link FunctionAllGroupsCollector} instance. - * - * @param groupBy The {@link ValueSource} to group by - * @param vsContext The ValueSource context - */ - public FunctionAllGroupsCollector(ValueSource groupBy, Map vsContext) { - this.vsContext = vsContext; - this.groupBy = groupBy; - } - - @Override - public Collection getGroups() { - return groups; - } - - @Override - public void collect(int doc) throws IOException { - filler.fillValue(doc); - if (!groups.contains(mval)) { - groups.add(mval.duplicate()); - } - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - FunctionValues values = groupBy.getValues(vsContext, context); - filler = values.getValueFiller(); - mval = filler.getValue(); - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java deleted file mode 100644 index 69ead076d6fc..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.function; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.queries.function.FunctionValues; -import org.apache.lucene.queries.function.ValueSource; -import org.apache.lucene.search.grouping.DistinctValuesCollector; -import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.util.mutable.MutableValue; - -/** - * Function based implementation of {@link DistinctValuesCollector}. - * - * @lucene.experimental - */ -public class FunctionDistinctValuesCollector extends DistinctValuesCollector { - - private final Map vsContext; - private final ValueSource groupSource; - private final ValueSource countSource; - private final Map> groupMap; - - private FunctionValues.ValueFiller groupFiller; - private FunctionValues.ValueFiller countFiller; - private MutableValue groupMval; - private MutableValue countMval; - - public FunctionDistinctValuesCollector(Map vsContext, ValueSource groupSource, ValueSource countSource, Collection> groups) { - this.vsContext = vsContext; - this.groupSource = groupSource; - this.countSource = countSource; - groupMap = new LinkedHashMap<>(); - for (SearchGroup group : groups) { - groupMap.put(group.groupValue, new GroupCount<>(group.groupValue)); - } - } - - @Override - public List> getGroups() { - return new ArrayList<>(groupMap.values()); - } - - @Override - public void collect(int doc) throws IOException { - groupFiller.fillValue(doc); - GroupCount groupCount = groupMap.get(groupMval); - if (groupCount != null) { - countFiller.fillValue(doc); - groupCount.uniqueValues.add(countMval.duplicate()); - } - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - FunctionValues values = groupSource.getValues(vsContext, context); - groupFiller = values.getValueFiller(); - groupMval = groupFiller.getValue(); - values = countSource.getValues(vsContext, context); - countFiller = values.getValueFiller(); - countMval = countFiller.getValue(); - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java deleted file mode 100644 index 85376e6a066f..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.function; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.queries.function.FunctionValues; -import org.apache.lucene.queries.function.ValueSource; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.FirstPassGroupingCollector; -import org.apache.lucene.util.mutable.MutableValue; - -import java.io.IOException; -import java.util.Map; - -/** - * Concrete implementation of {@link FirstPassGroupingCollector} that groups based on - * {@link ValueSource} instances. - * - * @lucene.experimental - */ -public class FunctionFirstPassGroupingCollector extends FirstPassGroupingCollector { - - private final ValueSource groupByVS; - private final Map vsContext; - - private FunctionValues.ValueFiller filler; - private MutableValue mval; - - /** - * Creates a first pass collector. - * - * @param groupByVS The {@link ValueSource} instance to group by - * @param vsContext The ValueSource context - * @param groupSort The {@link Sort} used to sort the - * groups. The top sorted document within each group - * according to groupSort, determines how that group - * sorts against other groups. This must be non-null, - * ie, if you want to groupSort by relevance use - * Sort.RELEVANCE. - * @param topNGroups How many top groups to keep. - * @throws IOException When I/O related errors occur - */ - public FunctionFirstPassGroupingCollector(ValueSource groupByVS, Map vsContext, Sort groupSort, int topNGroups) throws IOException { - super(groupSort, topNGroups); - this.groupByVS = groupByVS; - this.vsContext = vsContext; - } - - @Override - protected MutableValue getDocGroupValue(int doc) throws IOException { - filler.fillValue(doc); - return mval; - } - - @Override - protected MutableValue copyDocGroupValue(MutableValue groupValue, MutableValue reuse) { - if (reuse != null) { - reuse.copy(groupValue); - return reuse; - } - return groupValue.duplicate(); - } - - @Override - protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { - super.doSetNextReader(readerContext); - FunctionValues values = groupByVS.getValues(vsContext, readerContext); - filler = values.getValueFiller(); - mval = filler.getValue(); - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java deleted file mode 100644 index 5204dc266570..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.search.grouping.function; - -import java.io.IOException; -import java.util.Collection; -import java.util.Map; - -import org.apache.lucene.queries.function.ValueSource; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.AllGroupHeadsCollector; -import org.apache.lucene.search.grouping.AllGroupsCollector; -import org.apache.lucene.search.grouping.FirstPassGroupingCollector; -import org.apache.lucene.search.grouping.SecondPassGroupingCollector; -import org.apache.lucene.search.grouping.Grouper; -import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.util.mutable.MutableValue; - -/** - * Collector factory for grouping by ValueSource - */ -public class FunctionGrouper extends Grouper { - - private final ValueSource valueSource; - private final Map context; - - /** - * Create a Grouper for the provided ValueSource and context - */ - public FunctionGrouper(ValueSource valueSource, Map context) { - this.valueSource = valueSource; - this.context = context; - } - - @Override - public FirstPassGroupingCollector getFirstPassCollector(Sort sort, int count) throws IOException { - return new FunctionFirstPassGroupingCollector(valueSource, context, sort, count); - } - - @Override - public AllGroupHeadsCollector getGroupHeadsCollector(Sort sort) { - return new FunctionAllGroupHeadsCollector(valueSource, context, sort); - } - - @Override - public AllGroupsCollector getAllGroupsCollector() { - return new FunctionAllGroupsCollector(valueSource, context); - } - - @Override - public SecondPassGroupingCollector getSecondPassCollector(Collection> searchGroups, Sort groupSort, Sort withinGroupSort, int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException { - return new FunctionSecondPassGroupingCollector(searchGroups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, valueSource, context); - } -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java deleted file mode 100644 index 45f2b37887ec..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.function; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.queries.function.FunctionValues; -import org.apache.lucene.queries.function.ValueSource; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.SecondPassGroupingCollector; -import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.util.mutable.MutableValue; -import org.apache.lucene.search.grouping.TopGroups; //javadoc - -import java.io.IOException; -import java.util.Collection; -import java.util.Map; - -/** - * Concrete implementation of {@link SecondPassGroupingCollector} that groups based on - * {@link ValueSource} instances. - * - * @lucene.experimental - */ -public class FunctionSecondPassGroupingCollector extends SecondPassGroupingCollector { - - private final ValueSource groupByVS; - private final Map vsContext; - - private FunctionValues.ValueFiller filler; - private MutableValue mval; - - /** - * Constructs a {@link FunctionSecondPassGroupingCollector} instance. - * - * @param searchGroups The {@link SearchGroup} instances collected during the first phase. - * @param groupSort The group sort - * @param withinGroupSort The sort inside a group - * @param maxDocsPerGroup The maximum number of documents to collect inside a group - * @param getScores Whether to include the scores - * @param getMaxScores Whether to include the maximum score - * @param fillSortFields Whether to fill the sort values in {@link TopGroups#withinGroupSort} - * @param groupByVS The {@link ValueSource} to group by - * @param vsContext The value source context - * @throws IOException IOException When I/O related errors occur - */ - public FunctionSecondPassGroupingCollector(Collection> searchGroups, Sort groupSort, Sort withinGroupSort, int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields, ValueSource groupByVS, Map vsContext) throws IOException { - super(searchGroups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); - this.groupByVS = groupByVS; - this.vsContext = vsContext; - } - - @Override - protected SearchGroupDocs retrieveGroup(int doc) throws IOException { - filler.fillValue(doc); - return groupMap.get(mval); - } - - @Override - protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { - super.doSetNextReader(readerContext); - FunctionValues values = groupByVS.getValues(vsContext, readerContext); - filler = values.getValueFiller(); - mval = filler.getValue(); - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java index 824a98e31bf5..7e3745e61f28 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java @@ -62,9 +62,9 @@ * * *

The implementation is two-pass: the first pass ({@link - * org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector}) + * org.apache.lucene.search.grouping.FirstPassGroupingCollector}) * gathers the top groups, and the second pass ({@link - * org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector}) + * org.apache.lucene.search.grouping.SecondPassGroupingCollector}) * gathers documents within those groups. If the search is costly to * run you may want to use the {@link * org.apache.lucene.search.CachingCollector} class, which @@ -73,18 +73,17 @@ * hold all hits. Results are returned as a {@link * org.apache.lucene.search.grouping.TopGroups} instance.

* - *

- * This module abstracts away what defines group and how it is collected. All grouping collectors - * are abstract and have currently term based implementations. One can implement - * collectors that for example group on multiple fields. - *

+ *

Groups are defined by {@link org.apache.lucene.search.grouping.GroupSelector} + * implementations:

+ *
    + *
  • {@link org.apache.lucene.search.grouping.TermGroupSelector} groups based on + * the value of a {@link org.apache.lucene.index.SortedDocValues} field
  • + *
  • {@link org.apache.lucene.search.grouping.ValueSourceGroupSelector} groups based on + * the value of a {@link org.apache.lucene.queries.function.ValueSource}
  • + *
* *

Known limitations:

*
    - *
  • For the two-pass grouping search, the group field must be a - * indexed as a {@link org.apache.lucene.document.SortedDocValuesField}). - *
  • Although Solr support grouping by function and this module has abstraction of what a group is, there are currently only - * implementations for grouping based on terms. *
  • Sharding is not directly supported, though is not too * difficult, if you can merge the top groups and top documents per * group yourself. @@ -174,14 +173,15 @@ * have to separately retrieve it (for example using stored * fields, FieldCache, etc.). * - *

    Another collector is the TermAllGroupHeadsCollector that can be used to retrieve all most relevant + *

    Another collector is the AllGroupHeadsCollector that can be used to retrieve all most relevant * documents per group. Also known as group heads. This can be useful in situations when one wants to compute group * based facets / statistics on the complete query result. The collector can be executed during the first or second * phase. This collector can also be used with the GroupingSearch convenience utility, but when if one only * wants to compute the most relevant documents per group it is better to just use the collector as done here below.

    * *
    - *   AbstractAllGroupHeadsCollector c = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup);
    + *   TermGroupSelector grouper = new TermGroupSelector(groupField);
    + *   AllGroupHeadsCollector c = AllGroupHeadsCollector.newCollector(grouper, sortWithinGroup);
      *   s.search(new TermQuery(new Term("content", searchTerm)), c);
      *   // Return all group heads as int array
      *   int[] groupHeadsArray = c.retrieveGroupHeads()
    @@ -189,12 +189,6 @@
      *   int maxDoc = s.maxDoc();
      *   FixedBitSet groupHeadsBitSet = c.retrieveGroupHeads(maxDoc)
      * 
    - * - *

    For each of the above collector types there is also a variant that works with ValueSource instead of - * of fields. Concretely this means that these variants can work with functions. These variants are slower than - * there term based counter parts. These implementations are located in the - * org.apache.lucene.search.grouping.function package, but can also be used with the - * GroupingSearch convenience utility - *

    + * */ package org.apache.lucene.search.grouping; diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java deleted file mode 100644 index 54e23993e586..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java +++ /dev/null @@ -1,767 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.term; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.grouping.AllGroupHeadsCollector; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.SentinelIntSet; - -/** - * A base implementation of {@link AllGroupHeadsCollector} for retrieving the most relevant groups when grouping - * on a string based group field. More specifically this all concrete implementations of this base implementation - * use {@link SortedDocValues}. - * - * @lucene.experimental - */ -public abstract class TermAllGroupHeadsCollector extends AllGroupHeadsCollector { - - private static final int DEFAULT_INITIAL_SIZE = 128; - - final String groupField; - - SortedDocValues groupIndex; - LeafReaderContext readerContext; - - protected TermAllGroupHeadsCollector(String groupField, int numberOfSorts) { - super(numberOfSorts); - this.groupField = groupField; - } - - /** - * Creates an AbstractAllGroupHeadsCollector instance based on the supplied arguments. - * This factory method decides with implementation is best suited. - * - * Delegates to {@link #create(String, org.apache.lucene.search.Sort, int)} with an initialSize of 128. - * - * @param groupField The field to group by - * @param sortWithinGroup The sort within each group - * @return an AbstractAllGroupHeadsCollector instance based on the supplied arguments - */ - public static AllGroupHeadsCollector create(String groupField, Sort sortWithinGroup) { - return create(groupField, sortWithinGroup, DEFAULT_INITIAL_SIZE); - } - - /** - * Creates an AbstractAllGroupHeadsCollector instance based on the supplied arguments. - * This factory method decides with implementation is best suited. - * - * @param groupField The field to group by - * @param sortWithinGroup The sort within each group - * @param initialSize The initial allocation size of the internal int set and group list which should roughly match - * the total number of expected unique groups. Be aware that the heap usage is - * 4 bytes * initialSize. - * @return an AbstractAllGroupHeadsCollector instance based on the supplied arguments - */ - public static AllGroupHeadsCollector create(String groupField, Sort sortWithinGroup, int initialSize) { - boolean sortAllScore = true; - boolean sortAllFieldValue = true; - - for (SortField sortField : sortWithinGroup.getSort()) { - if (sortField.getType() == SortField.Type.SCORE) { - sortAllFieldValue = false; - } else if (needGeneralImpl(sortField)) { - return new GeneralAllGroupHeadsCollector(groupField, sortWithinGroup); - } else { - sortAllScore = false; - } - } - - if (sortAllScore) { - return new ScoreAllGroupHeadsCollector(groupField, sortWithinGroup, initialSize); - } else if (sortAllFieldValue) { - return new OrdAllGroupHeadsCollector(groupField, sortWithinGroup, initialSize); - } else { - return new OrdScoreAllGroupHeadsCollector(groupField, sortWithinGroup, initialSize); - } - } - - // Returns when a sort field needs the general impl. - private static boolean needGeneralImpl(SortField sortField) { - SortField.Type sortType = sortField.getType(); - // Note (MvG): We can also make an optimized impl when sorting is SortField.DOC - return sortType != SortField.Type.STRING_VAL && sortType != SortField.Type.STRING && sortType != SortField.Type.SCORE; - } - - // A general impl that works for any group sort. - static class GeneralAllGroupHeadsCollector extends TermAllGroupHeadsCollector { - - private final Sort sortWithinGroup; - private final Map groups; - - Scorer scorer; - - GeneralAllGroupHeadsCollector(String groupField, Sort sortWithinGroup) { - super(groupField, sortWithinGroup.getSort().length); - this.sortWithinGroup = sortWithinGroup; - groups = new HashMap<>(); - - final SortField[] sortFields = sortWithinGroup.getSort(); - for (int i = 0; i < sortFields.length; i++) { - reversed[i] = sortFields[i].getReverse() ? -1 : 1; - } - } - - protected int getOrdForDoc(int doc) throws IOException { - int curDocID = groupIndex.docID(); - if (curDocID < doc) { - curDocID = groupIndex.advance(doc); - } - - if (curDocID == doc) { - return groupIndex.ordValue(); - } else { - return -1; - } - } - - @Override - protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException { - int ord = getOrdForDoc(doc); - - BytesRef groupValue; - if (ord == -1) { - groupValue = null; - } else { - groupValue = groupIndex.lookupOrd(ord); - } - - GroupHead groupHead = groups.get(groupValue); - if (groupHead == null) { - groupValue = groupValue == null ? null : BytesRef.deepCopyOf(groupValue); - groupHead = new GroupHead(groupValue, sortWithinGroup, doc); - groups.put(groupValue, groupHead); - temporalResult.stop = true; - } else { - temporalResult.stop = false; - } - temporalResult.groupHead = groupHead; - } - - @Override - protected Collection getCollectedGroupHeads() { - return groups.values(); - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - this.readerContext = context; - groupIndex = DocValues.getSorted(context.reader(), groupField); - - for (GroupHead groupHead : groups.values()) { - for (int i = 0; i < groupHead.comparators.length; i++) { - groupHead.leafComparators[i] = groupHead.comparators[i].getLeafComparator(context); - } - } - } - - @Override - public boolean needsScores() { - return sortWithinGroup.needsScores(); - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - for (GroupHead groupHead : groups.values()) { - for (LeafFieldComparator comparator : groupHead.leafComparators) { - comparator.setScorer(scorer); - } - } - } - - class GroupHead extends AllGroupHeadsCollector.GroupHead { - - @SuppressWarnings({"unchecked", "rawtypes"}) - final FieldComparator[] comparators; - - final LeafFieldComparator[] leafComparators; - - @SuppressWarnings({"unchecked", "rawtypes"}) - GroupHead(BytesRef groupValue, Sort sort, int doc) throws IOException { - super(groupValue, doc + readerContext.docBase); - final SortField[] sortFields = sort.getSort(); - comparators = new FieldComparator[sortFields.length]; - leafComparators = new LeafFieldComparator[sortFields.length]; - for (int i = 0; i < sortFields.length; i++) { - comparators[i] = sortFields[i].getComparator(1, i); - leafComparators[i] = comparators[i].getLeafComparator(readerContext); - leafComparators[i].setScorer(scorer); - leafComparators[i].copy(0, doc); - leafComparators[i].setBottom(0); - } - } - - @Override - public int compare(int compIDX, int doc) throws IOException { - return leafComparators[compIDX].compareBottom(doc); - } - - @Override - public void updateDocHead(int doc) throws IOException { - for (LeafFieldComparator comparator : leafComparators) { - comparator.copy(0, doc); - comparator.setBottom(0); - } - this.doc = doc + readerContext.docBase; - } - } - } - - - // AbstractAllGroupHeadsCollector optimized for ord fields and scores. - static class OrdScoreAllGroupHeadsCollector extends TermAllGroupHeadsCollector { - - private final SentinelIntSet ordSet; - private final List collectedGroups; - final SortField[] fields; - - SortedDocValues[] sortsIndex; - Scorer scorer; - private GroupHead[] segmentGroupHeads; - - OrdScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { - super(groupField, sortWithinGroup.getSort().length); - ordSet = new SentinelIntSet(initialSize, -2); - collectedGroups = new ArrayList<>(initialSize); - - final SortField[] sortFields = sortWithinGroup.getSort(); - fields = new SortField[sortFields.length]; - sortsIndex = new SortedDocValues[sortFields.length]; - for (int i = 0; i < sortFields.length; i++) { - reversed[i] = sortFields[i].getReverse() ? -1 : 1; - fields[i] = sortFields[i]; - } - } - - @Override - protected Collection getCollectedGroupHeads() { - return collectedGroups; - } - - @Override - public boolean needsScores() { - return true; - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - private int getOrdForDoc(int doc) throws IOException { - int curDocID = groupIndex.docID(); - if (curDocID < doc) { - curDocID = groupIndex.advance(doc); - } - - if (curDocID == doc) { - return groupIndex.ordValue(); - } else { - return -1; - } - } - - @Override - protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException { - int key = getOrdForDoc(doc); - GroupHead groupHead; - if (!ordSet.exists(key)) { - ordSet.put(key); - final BytesRef term; - if (key == -1) { - term = null; - } else { - term = BytesRef.deepCopyOf(groupIndex.lookupOrd(key)); - } - groupHead = new GroupHead(doc, term); - collectedGroups.add(groupHead); - segmentGroupHeads[key+1] = groupHead; - temporalResult.stop = true; - } else { - temporalResult.stop = false; - groupHead = segmentGroupHeads[key+1]; - } - temporalResult.groupHead = groupHead; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - this.readerContext = context; - groupIndex = DocValues.getSorted(context.reader(), groupField); - for (int i = 0; i < fields.length; i++) { - if (fields[i].getType() == SortField.Type.SCORE) { - continue; - } - - sortsIndex[i] = DocValues.getSorted(context.reader(), fields[i].getField()); - } - - // Clear ordSet and fill it with previous encountered groups that can occur in the current segment. - ordSet.clear(); - segmentGroupHeads = new GroupHead[groupIndex.getValueCount()+1]; - for (GroupHead collectedGroup : collectedGroups) { - int ord; - if (collectedGroup.groupValue == null) { - ord = -1; - } else { - ord = groupIndex.lookupTerm(collectedGroup.groupValue); - } - if (collectedGroup.groupValue == null || ord >= 0) { - ordSet.put(ord); - segmentGroupHeads[ord+1] = collectedGroup; - - for (int i = 0; i < sortsIndex.length; i++) { - if (fields[i].getType() == SortField.Type.SCORE) { - continue; - } - int sortOrd; - if (collectedGroup.sortValues[i] == null) { - sortOrd = -1; - } else { - sortOrd = sortsIndex[i].lookupTerm(collectedGroup.sortValues[i].get()); - } - collectedGroup.sortOrds[i] = sortOrd; - } - } - } - } - - void setDoc(int docID) throws IOException { - for (int i = 0; i < sortsIndex.length; i++) { - SortedDocValues values = sortsIndex[i]; - if (values != null && docID > values.docID()) { - values.advance(docID); - } - } - } - - class GroupHead extends AllGroupHeadsCollector.GroupHead { - - BytesRefBuilder[] sortValues; - int[] sortOrds; - float[] scores; - - GroupHead(int doc, BytesRef groupValue) throws IOException { - super(groupValue, doc + readerContext.docBase); - sortValues = new BytesRefBuilder[sortsIndex.length]; - sortOrds = new int[sortsIndex.length]; - scores = new float[sortsIndex.length]; - setDoc(doc); - for (int i = 0; i < sortsIndex.length; i++) { - if (fields[i].getType() == SortField.Type.SCORE) { - scores[i] = scorer.score(); - } else { - if (doc == sortsIndex[i].docID()) { - sortOrds[i] = sortsIndex[i].ordValue(); - } else { - sortOrds[i] = -1; - } - sortValues[i] = new BytesRefBuilder(); - if (sortOrds[i] != -1) { - sortValues[i].copyBytes(sortsIndex[i].binaryValue()); - } - } - } - } - - @Override - public int compare(int compIDX, int doc) throws IOException { - if (fields[compIDX].getType() == SortField.Type.SCORE) { - float score = scorer.score(); - if (scores[compIDX] < score) { - return 1; - } else if (scores[compIDX] > score) { - return -1; - } - return 0; - } else { - if (sortsIndex[compIDX].docID() < doc) { - sortsIndex[compIDX].advance(doc); - } - if (sortOrds[compIDX] < 0) { - // The current segment doesn't contain the sort value we encountered before. Therefore the ord is negative. - final BytesRef term; - if (sortsIndex[compIDX].docID() == doc) { - term = sortsIndex[compIDX].binaryValue(); - } else { - term = new BytesRef(BytesRef.EMPTY_BYTES); - } - return sortValues[compIDX].get().compareTo(term); - } else { - int ord; - if (sortsIndex[compIDX].docID() == doc) { - ord = sortsIndex[compIDX].ordValue(); - } else { - ord = -1; - } - return sortOrds[compIDX] - ord; - } - } - } - - @Override - public void updateDocHead(int doc) throws IOException { - setDoc(doc); - for (int i = 0; i < sortsIndex.length; i++) { - if (fields[i].getType() == SortField.Type.SCORE) { - scores[i] = scorer.score(); - } else { - if (sortsIndex[i].docID() == doc) { - sortOrds[i] = sortsIndex[i].ordValue(); - sortValues[i].copyBytes(sortsIndex[i].binaryValue()); - } else { - sortOrds[i] = -1; - sortValues[i].clear(); - } - } - } - this.doc = doc + readerContext.docBase; - } - } - } - - - // AbstractAllGroupHeadsCollector optimized for ord fields. - static class OrdAllGroupHeadsCollector extends TermAllGroupHeadsCollector { - - private final SentinelIntSet ordSet; - private final List collectedGroups; - private final SortField[] fields; - - SortedDocValues[] sortsIndex; - GroupHead[] segmentGroupHeads; - - OrdAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { - super(groupField, sortWithinGroup.getSort().length); - ordSet = new SentinelIntSet(initialSize, -2); - collectedGroups = new ArrayList<>(initialSize); - - final SortField[] sortFields = sortWithinGroup.getSort(); - fields = new SortField[sortFields.length]; - sortsIndex = new SortedDocValues[sortFields.length]; - for (int i = 0; i < sortFields.length; i++) { - reversed[i] = sortFields[i].getReverse() ? -1 : 1; - fields[i] = sortFields[i]; - } - } - - @Override - protected Collection getCollectedGroupHeads() { - return collectedGroups; - } - - @Override - public boolean needsScores() { - return false; - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - } - - @Override - protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException { - if (doc > groupIndex.docID()) { - groupIndex.advance(doc); - } - - int key; - if (doc == groupIndex.docID()) { - key = groupIndex.ordValue(); - } else { - key = -1; - } - - GroupHead groupHead; - if (!ordSet.exists(key)) { - ordSet.put(key); - final BytesRef term; - if (key == -1) { - term = null; - } else { - term = BytesRef.deepCopyOf(groupIndex.lookupOrd(key)); - } - groupHead = new GroupHead(doc, term); - collectedGroups.add(groupHead); - segmentGroupHeads[key+1] = groupHead; - temporalResult.stop = true; - } else { - temporalResult.stop = false; - groupHead = segmentGroupHeads[key+1]; - } - temporalResult.groupHead = groupHead; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - this.readerContext = context; - groupIndex = DocValues.getSorted(context.reader(), groupField); - for (int i = 0; i < fields.length; i++) { - sortsIndex[i] = DocValues.getSorted(context.reader(), fields[i].getField()); - } - - // Clear ordSet and fill it with previous encountered groups that can occur in the current segment. - ordSet.clear(); - segmentGroupHeads = new GroupHead[groupIndex.getValueCount()+1]; - for (GroupHead collectedGroup : collectedGroups) { - int groupOrd; - if (collectedGroup.groupValue == null) { - groupOrd = -1; - } else { - groupOrd = groupIndex.lookupTerm(collectedGroup.groupValue); - } - if (collectedGroup.groupValue == null || groupOrd >= 0) { - ordSet.put(groupOrd); - segmentGroupHeads[groupOrd+1] = collectedGroup; - - for (int i = 0; i < sortsIndex.length; i++) { - int sortOrd; - if (collectedGroup.sortOrds[i] == -1) { - sortOrd = -1; - } else { - sortOrd = sortsIndex[i].lookupTerm(collectedGroup.sortValues[i].get()); - } - collectedGroup.sortOrds[i] = sortOrd; - } - } - } - } - - void setDoc(int docID) throws IOException { - for (int i = 0; i < sortsIndex.length; i++) { - SortedDocValues values = sortsIndex[i]; - if (docID > values.docID()) { - values.advance(docID); - } - } - } - - class GroupHead extends AllGroupHeadsCollector.GroupHead { - - BytesRefBuilder[] sortValues; - int[] sortOrds; - - GroupHead(int doc, BytesRef groupValue) throws IOException { - super(groupValue, doc + readerContext.docBase); - sortValues = new BytesRefBuilder[sortsIndex.length]; - sortOrds = new int[sortsIndex.length]; - setDoc(doc); - for (int i = 0; i < sortsIndex.length; i++) { - if (doc == sortsIndex[i].docID()) { - sortOrds[i] = sortsIndex[i].ordValue(); - } else { - sortOrds[i] = -1; - } - sortValues[i] = new BytesRefBuilder(); - if (sortOrds[i] != -1) { - sortValues[i].copyBytes(sortsIndex[i].binaryValue()); - } - } - } - - @Override - public int compare(int compIDX, int doc) throws IOException { - if (sortsIndex[compIDX].docID() < doc) { - sortsIndex[compIDX].advance(doc); - } - if (sortOrds[compIDX] < 0) { - // The current segment doesn't contain the sort value we encountered before. Therefore the ord is negative. - final BytesRef term; - if (sortsIndex[compIDX].docID() == doc) { - term = sortsIndex[compIDX].binaryValue(); - } else { - term = new BytesRef(BytesRef.EMPTY_BYTES); - } - return sortValues[compIDX].get().compareTo(term); - } else { - int ord; - if (sortsIndex[compIDX].docID() == doc) { - ord = sortsIndex[compIDX].ordValue(); - } else { - ord = -1; - } - return sortOrds[compIDX] - ord; - } - } - - @Override - public void updateDocHead(int doc) throws IOException { - setDoc(doc); - for (int i = 0; i < sortsIndex.length; i++) { - if (sortsIndex[i].docID() == doc) { - sortOrds[i] = sortsIndex[i].ordValue(); - sortValues[i].copyBytes(sortsIndex[i].binaryValue()); - } else { - sortOrds[i] = -1; - sortValues[i].clear(); - } - } - this.doc = doc + readerContext.docBase; - } - - } - - } - - - // AbstractAllGroupHeadsCollector optimized for scores. - static class ScoreAllGroupHeadsCollector extends TermAllGroupHeadsCollector { - - final SentinelIntSet ordSet; - final List collectedGroups; - final SortField[] fields; - - Scorer scorer; - GroupHead[] segmentGroupHeads; - - ScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) { - super(groupField, sortWithinGroup.getSort().length); - ordSet = new SentinelIntSet(initialSize, -2); - collectedGroups = new ArrayList<>(initialSize); - - final SortField[] sortFields = sortWithinGroup.getSort(); - fields = new SortField[sortFields.length]; - for (int i = 0; i < sortFields.length; i++) { - reversed[i] = sortFields[i].getReverse() ? -1 : 1; - fields[i] = sortFields[i]; - } - } - - @Override - protected Collection getCollectedGroupHeads() { - return collectedGroups; - } - - @Override - public boolean needsScores() { - return true; - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - @Override - protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException { - if (doc > groupIndex.docID()) { - groupIndex.advance(doc); - } - - int key; - if (doc == groupIndex.docID()) { - key = groupIndex.ordValue(); - } else { - key = -1; - } - - GroupHead groupHead; - if (!ordSet.exists(key)) { - ordSet.put(key); - final BytesRef term; - if (key == -1) { - term = null; - } else { - term = BytesRef.deepCopyOf(groupIndex.lookupOrd(key)); - } - groupHead = new GroupHead(doc, term); - collectedGroups.add(groupHead); - segmentGroupHeads[key+1] = groupHead; - temporalResult.stop = true; - } else { - temporalResult.stop = false; - groupHead = segmentGroupHeads[key+1]; - } - temporalResult.groupHead = groupHead; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - this.readerContext = context; - groupIndex = DocValues.getSorted(context.reader(), groupField); - - // Clear ordSet and fill it with previous encountered groups that can occur in the current segment. - ordSet.clear(); - segmentGroupHeads = new GroupHead[groupIndex.getValueCount()+1]; - for (GroupHead collectedGroup : collectedGroups) { - int ord; - if (collectedGroup.groupValue == null) { - ord = -1; - } else { - ord = groupIndex.lookupTerm(collectedGroup.groupValue); - } - if (collectedGroup.groupValue == null || ord >= 0) { - ordSet.put(ord); - segmentGroupHeads[ord+1] = collectedGroup; - } - } - } - - class GroupHead extends AllGroupHeadsCollector.GroupHead { - - float[] scores; - - GroupHead(int doc, BytesRef groupValue) throws IOException { - super(groupValue, doc + readerContext.docBase); - scores = new float[fields.length]; - float score = scorer.score(); - for (int i = 0; i < scores.length; i++) { - scores[i] = score; - } - } - - @Override - public int compare(int compIDX, int doc) throws IOException { - float score = scorer.score(); - if (scores[compIDX] < score) { - return 1; - } else if (scores[compIDX] > score) { - return -1; - } - return 0; - } - - @Override - public void updateDocHead(int doc) throws IOException { - float score = scorer.score(); - for (int i = 0; i < scores.length; i++) { - scores[i] = score; - } - this.doc = doc + readerContext.docBase; - } - - } - - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java deleted file mode 100644 index 125555a0940b..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.term; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.grouping.AllGroupsCollector; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.SentinelIntSet; - -/** - * A collector that collects all groups that match the - * query. Only the group value is collected, and the order - * is undefined. This collector does not determine - * the most relevant document of a group. - *

    - * Implementation detail: an int hash set (SentinelIntSet) - * is used to detect if a group is already added to the - * total count. For each segment the int set is cleared and filled - * with previous counted groups that occur in the new - * segment. - * - * @lucene.experimental - */ -public class TermAllGroupsCollector extends AllGroupsCollector { - - private static final int DEFAULT_INITIAL_SIZE = 128; - - private final String groupField; - private final SentinelIntSet ordSet; - private final List groups; - - private SortedDocValues index; - - /** - * Expert: Constructs a {@link AllGroupsCollector} - * - * @param groupField The field to group by - * @param initialSize The initial allocation size of the - * internal int set and group list - * which should roughly match the total - * number of expected unique groups. Be aware that the - * heap usage is 4 bytes * initialSize. - */ - public TermAllGroupsCollector(String groupField, int initialSize) { - ordSet = new SentinelIntSet(initialSize, -2); - groups = new ArrayList<>(initialSize); - this.groupField = groupField; - } - - /** - * Constructs a {@link AllGroupsCollector}. This sets the - * initial allocation size for the internal int set and group - * list to 128. - * - * @param groupField The field to group by - */ - public TermAllGroupsCollector(String groupField) { - this(groupField, DEFAULT_INITIAL_SIZE); - } - - @Override - public void collect(int doc) throws IOException { - if (doc > index.docID()) { - index.advance(doc); - } - int key; - if (doc == index.docID()) { - key = index.ordValue(); - } else { - key = -1; - } - if (!ordSet.exists(key)) { - ordSet.put(key); - final BytesRef term; - if (key == -1) { - term = null; - } else { - term = BytesRef.deepCopyOf(index.lookupOrd(key)); - } - groups.add(term); - } - } - - @Override - public Collection getGroups() { - return groups; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - index = DocValues.getSorted(context.reader(), groupField); - - // Clear ordSet and fill it with previous encountered groups that can occur in the current segment. - ordSet.clear(); - for (BytesRef countedGroup : groups) { - if (countedGroup == null) { - ordSet.put(-1); - } else { - int ord = index.lookupTerm(countedGroup); - if (ord >= 0) { - ordSet.put(ord); - } - } - } - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java deleted file mode 100644 index e5356a311b46..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.term; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.grouping.DistinctValuesCollector; -import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.SentinelIntSet; - -/** - * A term based implementation of {@link DistinctValuesCollector} that relies - * on {@link SortedDocValues} to count the distinct values per group. - * - * @lucene.experimental - */ -public class TermDistinctValuesCollector extends DistinctValuesCollector { - - private final String groupField; - private final String countField; - private final List groups; - private final SentinelIntSet ordSet; - private final TermGroupCount groupCounts[]; - - private SortedDocValues groupFieldTermIndex; - private SortedDocValues countFieldTermIndex; - - /** - * Constructs {@link TermDistinctValuesCollector} instance. - * - * @param groupField The field to group by - * @param countField The field to count distinct values for - * @param groups The top N groups, collected during the first phase search - */ - public TermDistinctValuesCollector(String groupField, String countField, Collection> groups) { - this.groupField = groupField; - this.countField = countField; - this.groups = new ArrayList<>(groups.size()); - for (SearchGroup group : groups) { - this.groups.add(new TermGroupCount(group.groupValue)); - } - ordSet = new SentinelIntSet(groups.size(), -2); - groupCounts = new TermGroupCount[ordSet.keys.length]; - } - - @Override - public void collect(int doc) throws IOException { - if (doc > groupFieldTermIndex.docID()) { - groupFieldTermIndex.advance(doc); - } - int ord; - if (doc == groupFieldTermIndex.docID()) { - ord = groupFieldTermIndex.ordValue(); - } else { - ord = -1; - } - int slot = ordSet.find(ord); - if (slot < 0) { - return; - } - - TermGroupCount gc = groupCounts[slot]; - if (doc > countFieldTermIndex.docID()) { - countFieldTermIndex.advance(doc); - } - - int countOrd; - if (doc == countFieldTermIndex.docID()) { - countOrd = countFieldTermIndex.ordValue(); - } else { - countOrd = -1; - } - - if (doesNotContainOrd(countOrd, gc.ords)) { - if (countOrd == -1) { - gc.uniqueValues.add(null); - } else { - BytesRef term = BytesRef.deepCopyOf(countFieldTermIndex.lookupOrd(countOrd)); - gc.uniqueValues.add(term); - } - - gc.ords = Arrays.copyOf(gc.ords, gc.ords.length + 1); - gc.ords[gc.ords.length - 1] = countOrd; - if (gc.ords.length > 1) { - Arrays.sort(gc.ords); - } - } - } - - private boolean doesNotContainOrd(int ord, int[] ords) { - if (ords.length == 0) { - return true; - } else if (ords.length == 1) { - return ord != ords[0]; - } - return Arrays.binarySearch(ords, ord) < 0; - } - - @Override - public List> getGroups() { - return new ArrayList<>(groups); - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - groupFieldTermIndex = DocValues.getSorted(context.reader(), groupField); - countFieldTermIndex = DocValues.getSorted(context.reader(), countField); - ordSet.clear(); - for (TermGroupCount group : groups) { - int groupOrd = group.groupValue == null ? -1 : groupFieldTermIndex.lookupTerm(group.groupValue); - if (group.groupValue != null && groupOrd < 0) { - continue; - } - - groupCounts[ordSet.put(groupOrd)] = group; - group.ords = new int[group.uniqueValues.size()]; - Arrays.fill(group.ords, -2); - int i = 0; - for (BytesRef value : group.uniqueValues) { - int countOrd = value == null ? -1 : countFieldTermIndex.lookupTerm(value); - if (value == null || countOrd >= 0) { - group.ords[i++] = countOrd; - } - } - } - } - - /** Holds distinct values for a single group. - * - * @lucene.experimental */ - public static class TermGroupCount extends DistinctValuesCollector.GroupCount { - - int[] ords; - - TermGroupCount(BytesRef groupValue) { - super(groupValue); - } - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java deleted file mode 100644 index 3c35fa8b22c8..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.term; - -import java.io.IOException; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.FirstPassGroupingCollector; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -/** - * Concrete implementation of {@link FirstPassGroupingCollector} that groups based on - * field values and more specifically uses {@link SortedDocValues} - * to collect groups. - * - * @lucene.experimental - */ -public class TermFirstPassGroupingCollector extends FirstPassGroupingCollector { - - private SortedDocValues index; - - private String groupField; - - /** - * Create the first pass collector. - * - * @param groupField The field used to group - * documents. This field must be single-valued and - * indexed (DocValues is used to access its value - * per-document). - * @param groupSort The {@link Sort} used to sort the - * groups. The top sorted document within each group - * according to groupSort, determines how that group - * sorts against other groups. This must be non-null, - * ie, if you want to groupSort by relevance use - * Sort.RELEVANCE. - * @param topNGroups How many top groups to keep. - * @throws IOException When I/O related errors occur - */ - public TermFirstPassGroupingCollector(String groupField, Sort groupSort, int topNGroups) throws IOException { - super(groupSort, topNGroups); - this.groupField = groupField; - } - - @Override - protected BytesRef getDocGroupValue(int doc) throws IOException { - if (doc > index.docID()) { - index.advance(doc); - } - if (doc == index.docID()) { - return index.binaryValue(); - } else { - return null; - } - } - - @Override - protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) { - if (groupValue == null) { - return null; - } else if (reuse != null) { - reuse.bytes = ArrayUtil.grow(reuse.bytes, groupValue.length); - reuse.offset = 0; - reuse.length = groupValue.length; - System.arraycopy(groupValue.bytes, groupValue.offset, reuse.bytes, 0, groupValue.length); - return reuse; - } else { - return BytesRef.deepCopyOf(groupValue); - } - } - - @Override - protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { - super.doSetNextReader(readerContext); - index = DocValues.getSorted(readerContext.reader(), groupField); - } - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java deleted file mode 100644 index 246ee7853aa1..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.search.grouping.term; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.AllGroupHeadsCollector; -import org.apache.lucene.search.grouping.AllGroupsCollector; -import org.apache.lucene.search.grouping.FirstPassGroupingCollector; -import org.apache.lucene.search.grouping.SecondPassGroupingCollector; -import org.apache.lucene.search.grouping.Grouper; -import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.util.BytesRef; - -/** - * Collector factory for grouping by term - */ -public class TermGrouper extends Grouper { - - private final String field; - private final int initialSize; - - /** - * Create a new TermGrouper - * @param field the field to group on - */ - public TermGrouper(String field) { - this(field, 128); - } - - /** - * Create a new TermGrouper - * @param field the field to group on - * @param initialSize the initial size of various internal datastructures - */ - public TermGrouper(String field, int initialSize) { - this.field = field; - this.initialSize = initialSize; - } - - @Override - public FirstPassGroupingCollector getFirstPassCollector(Sort sort, int count) throws IOException { - return new TermFirstPassGroupingCollector(field, sort, count); - } - - @Override - public AllGroupHeadsCollector getGroupHeadsCollector(Sort sort) { - return TermAllGroupHeadsCollector.create(field, sort, initialSize); - } - - @Override - public AllGroupsCollector getAllGroupsCollector() { - return new TermAllGroupsCollector(field, initialSize); - } - - @Override - public SecondPassGroupingCollector getSecondPassCollector( - Collection> groups, Sort groupSort, Sort withinGroupSort, - int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException { - return new TermSecondPassGroupingCollector(field, groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); - } - - -} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java deleted file mode 100644 index 75d221024ba4..000000000000 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.search.grouping.term; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.SecondPassGroupingCollector; -import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.SentinelIntSet; - -/** - * Concrete implementation of {@link SecondPassGroupingCollector} that groups based on - * field values and more specifically uses {@link SortedDocValues} - * to collect grouped docs. - * - * @lucene.experimental - */ -public class TermSecondPassGroupingCollector extends SecondPassGroupingCollector { - - private final String groupField; - private final SentinelIntSet ordSet; - - private SortedDocValues index; - - @SuppressWarnings({"unchecked", "rawtypes"}) - public TermSecondPassGroupingCollector(String groupField, Collection> groups, Sort groupSort, Sort withinGroupSort, - int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) - throws IOException { - super(groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); - this.groupField = groupField; - this.ordSet = new SentinelIntSet(groupMap.size(), -2); - super.groupDocs = (SearchGroupDocs[]) new SearchGroupDocs[ordSet.keys.length]; - } - - @Override - protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { - super.doSetNextReader(readerContext); - index = DocValues.getSorted(readerContext.reader(), groupField); - - // Rebuild ordSet - ordSet.clear(); - for (SearchGroupDocs group : groupMap.values()) { -// System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); - int ord = group.groupValue == null ? -1 : index.lookupTerm(group.groupValue); - if (group.groupValue == null || ord >= 0) { - groupDocs[ordSet.put(ord)] = group; - } - } - } - - @Override - protected SearchGroupDocs retrieveGroup(int doc) throws IOException { - if (doc > index.docID()) { - index.advance(doc); - } - - int ord; - if (doc == index.docID()) { - ord = index.ordValue(); - } else { - ord = -1; - } - - int slot = ordSet.find(ord); - if (slot >= 0) { - return groupDocs[slot]; - } - return null; - } - -} diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java index 0c994565fa73..5ab4b5fed36b 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java @@ -49,8 +49,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; @@ -513,19 +511,12 @@ public int compare(GroupDoc d1, GroupDoc d2) { @SuppressWarnings({"unchecked","rawtypes"}) private AllGroupHeadsCollector createRandomCollector(String groupField, Sort sortWithinGroup) { - AllGroupHeadsCollector collector; if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); - collector = new FunctionAllGroupHeadsCollector(vs, new HashMap<>(), sortWithinGroup); + return AllGroupHeadsCollector.newCollector(new ValueSourceGroupSelector(vs, new HashMap<>()), sortWithinGroup); } else { - collector = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup); + return AllGroupHeadsCollector.newCollector(new TermGroupSelector(groupField), sortWithinGroup); } - - if (VERBOSE) { - System.out.println("Selected implementation: " + collector.getClass().getSimpleName()); - } - - return collector; } private void addGroupField(Document doc, String groupField, String value, DocValuesType valueType) { diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java index ab70fad2339f..0d777f6e7fb6 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java @@ -16,6 +16,8 @@ */ package org.apache.lucene.search.grouping; +import java.util.HashMap; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -28,14 +30,10 @@ import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupsCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; -import java.util.HashMap; - public class AllGroupsCollectorTest extends LuceneTestCase { public void testTotalGroupCount() throws Exception { @@ -124,19 +122,13 @@ private void addGroupField(Document doc, String groupField, String value) { } private AllGroupsCollector createRandomCollector(String groupField) { - AllGroupsCollector selected; if (random().nextBoolean()) { - selected = new TermAllGroupsCollector(groupField); - } else { - ValueSource vs = new BytesRefFieldSource(groupField); - selected = new FunctionAllGroupsCollector(vs, new HashMap<>()); + return new AllGroupsCollector<>(new TermGroupSelector(groupField)); } - - if (VERBOSE) { - System.out.println("Selected implementation: " + selected.getClass().getName()); + else { + ValueSource vs = new BytesRefFieldSource(groupField); + return new AllGroupsCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>())); } - - return selected; } } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java index b5d67cf90585..89d9a6e4a46b 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java @@ -44,17 +44,12 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.grouping.function.FunctionDistinctValuesCollector; -import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermDistinctValuesCollector; -import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.mutable.MutableValue; import org.apache.lucene.util.mutable.MutableValueStr; - public class DistinctValuesCollectorTest extends AbstractGroupingTestCase { private final static NullComparator nullComparator = new NullComparator(); @@ -126,32 +121,27 @@ public void testSimple() throws Exception { IndexSearcher indexSearcher = newSearcher(w.getReader()); w.close(); - Comparator>> cmp = new Comparator>>() { - - @Override - public int compare(DistinctValuesCollector.GroupCount> groupCount1, DistinctValuesCollector.GroupCount> groupCount2) { - if (groupCount1.groupValue == null) { - if (groupCount2.groupValue == null) { - return 0; - } - return -1; - } else if (groupCount2.groupValue == null) { - return 1; - } else { - return groupCount1.groupValue.compareTo(groupCount2.groupValue); + Comparator, Comparable>> cmp = (groupCount1, groupCount2) -> { + if (groupCount1.groupValue == null) { + if (groupCount2.groupValue == null) { + return 0; } + return -1; + } else if (groupCount2.groupValue == null) { + return 1; + } else { + return groupCount1.groupValue.compareTo(groupCount2.groupValue); } - }; // === Search for content:random FirstPassGroupingCollector> firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10); indexSearcher.search(new TermQuery(new Term("content", "random")), firstCollector); - DistinctValuesCollector> distinctValuesCollector - = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD); + DistinctValuesCollector, Comparable> distinctValuesCollector + = createDistinctCountCollector(firstCollector, COUNT_FIELD); indexSearcher.search(new TermQuery(new Term("content", "random")), distinctValuesCollector); - List>> gcs = distinctValuesCollector.getGroups(); + List, Comparable>> gcs = distinctValuesCollector.getGroups(); Collections.sort(gcs, cmp); assertEquals(4, gcs.size()); @@ -180,7 +170,7 @@ public int compare(DistinctValuesCollector.GroupCount> groupC // === Search for content:some firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10); indexSearcher.search(new TermQuery(new Term("content", "some")), firstCollector); - distinctValuesCollector = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD); + distinctValuesCollector = createDistinctCountCollector(firstCollector, COUNT_FIELD); indexSearcher.search(new TermQuery(new Term("content", "some")), distinctValuesCollector); gcs = distinctValuesCollector.getGroups(); @@ -207,7 +197,7 @@ public int compare(DistinctValuesCollector.GroupCount> groupC // === Search for content:blob firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10); indexSearcher.search(new TermQuery(new Term("content", "blob")), firstCollector); - distinctValuesCollector = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD); + distinctValuesCollector = createDistinctCountCollector(firstCollector, COUNT_FIELD); indexSearcher.search(new TermQuery(new Term("content", "blob")), distinctValuesCollector); gcs = distinctValuesCollector.getGroups(); @@ -240,15 +230,15 @@ public void testRandom() throws Exception { Sort groupSort = new Sort(new SortField("id", SortField.Type.STRING)); int topN = 1 + random.nextInt(10); - List>> expectedResult = createExpectedResult(context, term, groupSort, topN); + List, Comparable>> expectedResult = createExpectedResult(context, term, groupSort, topN); - FirstPassGroupingCollector> firstCollector = createRandomFirstPassCollector(groupSort, GROUP_FIELD, topN); + FirstPassGroupingCollector> firstCollector = createRandomFirstPassCollector(groupSort, GROUP_FIELD, topN); searcher.search(new TermQuery(new Term("content", term)), firstCollector); - DistinctValuesCollector> distinctValuesCollector - = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD); + DistinctValuesCollector, Comparable> distinctValuesCollector + = createDistinctCountCollector(firstCollector, COUNT_FIELD); searcher.search(new TermQuery(new Term("content", term)), distinctValuesCollector); @SuppressWarnings("unchecked") - List>> actualResult = distinctValuesCollector.getGroups(); + List, Comparable>> actualResult = distinctValuesCollector.getGroups(); if (VERBOSE) { System.out.println("Index iter=" + indexIter); @@ -265,8 +255,8 @@ public void testRandom() throws Exception { assertEquals(expectedResult.size(), actualResult.size()); for (int i = 0; i < expectedResult.size(); i++) { - DistinctValuesCollector.GroupCount> expected = expectedResult.get(i); - DistinctValuesCollector.GroupCount> actual = actualResult.get(i); + DistinctValuesCollector.GroupCount, Comparable> expected = expectedResult.get(i); + DistinctValuesCollector.GroupCount, Comparable> actual = actualResult.get(i); assertValues(expected.groupValue, actual.groupValue); assertEquals(expected.uniqueValues.size(), actual.uniqueValues.size()); List> expectedUniqueValues = new ArrayList<>(expected.uniqueValues); @@ -283,9 +273,9 @@ public void testRandom() throws Exception { } } - private void printGroups(List>> results) { + private void printGroups(List, Comparable>> results) { for(int i=0;i> group = results.get(i); + DistinctValuesCollector.GroupCount, Comparable> group = results.get(i); Object gv = group.groupValue; if (gv instanceof BytesRef) { System.out.println(i + ": groupValue=" + ((BytesRef) gv).utf8ToString()); @@ -350,15 +340,16 @@ private void addField(Document doc, String field, String value) { } @SuppressWarnings({"unchecked","rawtypes"}) - private DistinctValuesCollector createDistinctCountCollector(FirstPassGroupingCollector firstPassGroupingCollector, - String groupField, - String countField) throws IOException { - Random random = random(); + private , R extends Comparable> DistinctValuesCollector createDistinctCountCollector(FirstPassGroupingCollector firstPassGroupingCollector, + String countField) throws IOException { Collection> searchGroups = firstPassGroupingCollector.getTopGroups(0, false); - if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - return (DistinctValuesCollector) new FunctionDistinctValuesCollector(new HashMap<>(), new BytesRefFieldSource(groupField), new BytesRefFieldSource(countField), (Collection) searchGroups); + GroupSelector selector = firstPassGroupingCollector.getGroupSelector(); + if (ValueSourceGroupSelector.class.isAssignableFrom(selector.getClass())) { + GroupSelector gs = new ValueSourceGroupSelector(new BytesRefFieldSource(countField), new HashMap<>()); + return new DistinctValuesCollector<>(selector, searchGroups, gs); } else { - return (DistinctValuesCollector) new TermDistinctValuesCollector(groupField, countField, (Collection) searchGroups); + GroupSelector ts = new TermGroupSelector(countField); + return new DistinctValuesCollector<>(selector, searchGroups, ts); } } @@ -366,21 +357,14 @@ private DistinctValuesCollector createDistinctCountCol private FirstPassGroupingCollector createRandomFirstPassCollector(Sort groupSort, String groupField, int topNGroups) throws IOException { Random random = random(); if (random.nextBoolean()) { - return (FirstPassGroupingCollector) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<>(), groupSort, topNGroups); + return (FirstPassGroupingCollector) new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(new BytesRefFieldSource(groupField), new HashMap<>()), groupSort, topNGroups); } else { - return (FirstPassGroupingCollector) new TermFirstPassGroupingCollector(groupField, groupSort, topNGroups); + return (FirstPassGroupingCollector) new FirstPassGroupingCollector<>(new TermGroupSelector(groupField), groupSort, topNGroups); } } @SuppressWarnings({"unchecked","rawtypes"}) - private List>> createExpectedResult(IndexContext context, String term, Sort groupSort, int topN) { - class GroupCount extends DistinctValuesCollector.GroupCount { - GroupCount(BytesRef groupValue, Collection uniqueValues) { - super(groupValue); - this.uniqueValues.addAll(uniqueValues); - } - } - + private List, Comparable>> createExpectedResult(IndexContext context, String term, Sort groupSort, int topN) { List result = new ArrayList(); Map> groupCounts = context.searchTermToGroupCounts.get(term); int i = 0; @@ -392,7 +376,7 @@ class GroupCount extends DistinctValuesCollector.GroupCount { for (String val : groupCounts.get(group)) { uniqueValues.add(val != null ? new BytesRef(val) : null); } - result.add(new GroupCount(group != null ? new BytesRef(group) : null, uniqueValues)); + result.add(new DistinctValuesCollector.GroupCount(group != null ? new BytesRef(group) : null, uniqueValues)); } return result; } diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java index c590502f9297..968ce5ab1a11 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java @@ -44,7 +44,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.grouping.term.TermGroupFacetCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index f079b854c869..b322fbae6769 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -58,12 +58,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector; -import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector; -import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupsCollector; -import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; @@ -147,7 +141,7 @@ public void testBasic() throws Exception { final FirstPassGroupingCollector c1 = createRandomFirstPassCollector(groupField, groupSort, 10); indexSearcher.search(new TermQuery(new Term("content", "random")), c1); - final SecondPassGroupingCollector c2 = createSecondPassCollector(c1, groupField, groupSort, Sort.RELEVANCE, 0, 5, true, true, true); + final TopGroupsCollector c2 = createSecondPassCollector(c1, groupSort, Sort.RELEVANCE, 0, 5, true, true, true); indexSearcher.search(new TermQuery(new Term("content", "random")), c2); final TopGroups groups = c2.getTopGroups(0); @@ -196,31 +190,26 @@ private void addGroupField(Document doc, String groupField, String value) { } private FirstPassGroupingCollector createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException { - FirstPassGroupingCollector selected; if (random().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); - selected = new FunctionFirstPassGroupingCollector(vs, new HashMap<>(), groupSort, topDocs); + return new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topDocs); } else { - selected = new TermFirstPassGroupingCollector(groupField, groupSort, topDocs); + return new FirstPassGroupingCollector<>(new TermGroupSelector(groupField), groupSort, topDocs); } - if (VERBOSE) { - System.out.println("Selected implementation: " + selected.getClass().getName()); - } - return selected; } private FirstPassGroupingCollector createFirstPassCollector(String groupField, Sort groupSort, int topDocs, FirstPassGroupingCollector firstPassGroupingCollector) throws IOException { - if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { + GroupSelector selector = firstPassGroupingCollector.getGroupSelector(); + if (TermGroupSelector.class.isAssignableFrom(selector.getClass())) { ValueSource vs = new BytesRefFieldSource(groupField); - return new FunctionFirstPassGroupingCollector(vs, new HashMap<>(), groupSort, topDocs); + return new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topDocs); } else { - return new TermFirstPassGroupingCollector(groupField, groupSort, topDocs); + return new FirstPassGroupingCollector<>(new TermGroupSelector(groupField), groupSort, topDocs); } } @SuppressWarnings({"unchecked","rawtypes"}) - private SecondPassGroupingCollector createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector, - String groupField, + private TopGroupsCollector createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector, Sort groupSort, Sort sortWithinGroup, int groupOffset, @@ -229,19 +218,13 @@ private SecondPassGroupingCollector createSecondPassCollector(FirstPassGr boolean getMaxScores, boolean fillSortFields) throws IOException { - if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - Collection> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields); - return (SecondPassGroupingCollector) new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields); - } else { - ValueSource vs = new BytesRefFieldSource(groupField); - Collection> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields); - return (SecondPassGroupingCollector) new FunctionSecondPassGroupingCollector(searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, vs, new HashMap()); - } + Collection> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields); + return new TopGroupsCollector<>(firstPassGroupingCollector.getGroupSelector(), searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); } // Basically converts searchGroups from MutableValue to BytesRef if grouping by ValueSource @SuppressWarnings("unchecked") - private SecondPassGroupingCollector createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector, + private TopGroupsCollector createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector, String groupField, Collection> searchGroups, Sort groupSort, @@ -250,8 +233,9 @@ private SecondPassGroupingCollector createSecondPassCollector(FirstPassGroupi boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException { - if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) { - return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields); + if (firstPassGroupingCollector.getGroupSelector().getClass().isAssignableFrom(TermGroupSelector.class)) { + GroupSelector selector = (GroupSelector) firstPassGroupingCollector.getGroupSelector(); + return new TopGroupsCollector<>(selector, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields); } else { ValueSource vs = new BytesRefFieldSource(groupField); List> mvalSearchGroups = new ArrayList<>(searchGroups.size()); @@ -267,19 +251,14 @@ private SecondPassGroupingCollector createSecondPassCollector(FirstPassGroupi sg.sortValues = mergedTopGroup.sortValues; mvalSearchGroups.add(sg); } - - return new FunctionSecondPassGroupingCollector(mvalSearchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, vs, new HashMap<>()); + ValueSourceGroupSelector selector = new ValueSourceGroupSelector(vs, new HashMap<>()); + return new TopGroupsCollector<>(selector, mvalSearchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); } } private AllGroupsCollector createAllGroupsCollector(FirstPassGroupingCollector firstPassGroupingCollector, String groupField) { - if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) { - return new TermAllGroupsCollector(groupField); - } else { - ValueSource vs = new BytesRefFieldSource(groupField); - return new FunctionAllGroupsCollector(vs, new HashMap<>()); - } + return new AllGroupsCollector<>(firstPassGroupingCollector.getGroupSelector()); } private void compareGroupValue(String expected, GroupDocs group) { @@ -306,10 +285,12 @@ private void compareGroupValue(String expected, GroupDocs group) { } private Collection> getSearchGroups(FirstPassGroupingCollector c, int groupOffset, boolean fillFields) throws IOException { - if (TermFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) { - return ((TermFirstPassGroupingCollector) c).getTopGroups(groupOffset, fillFields); - } else if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) { - Collection> mutableValueGroups = ((FunctionFirstPassGroupingCollector) c).getTopGroups(groupOffset, fillFields); + if (TermGroupSelector.class.isAssignableFrom(c.getGroupSelector().getClass())) { + FirstPassGroupingCollector collector = (FirstPassGroupingCollector) c; + return collector.getTopGroups(groupOffset, fillFields); + } else if (ValueSourceGroupSelector.class.isAssignableFrom(c.getGroupSelector().getClass())) { + FirstPassGroupingCollector collector = (FirstPassGroupingCollector) c; + Collection> mutableValueGroups = collector.getTopGroups(groupOffset, fillFields); if (mutableValueGroups == null) { return null; } @@ -328,11 +309,13 @@ private Collection> getSearchGroups(FirstPassGroupingColle } @SuppressWarnings({"unchecked", "rawtypes"}) - private TopGroups getTopGroups(SecondPassGroupingCollector c, int withinGroupOffset) { - if (c.getClass().isAssignableFrom(TermSecondPassGroupingCollector.class)) { - return ((TermSecondPassGroupingCollector) c).getTopGroups(withinGroupOffset); - } else if (c.getClass().isAssignableFrom(FunctionSecondPassGroupingCollector.class)) { - TopGroups mvalTopGroups = ((FunctionSecondPassGroupingCollector) c).getTopGroups(withinGroupOffset); + private TopGroups getTopGroups(TopGroupsCollector c, int withinGroupOffset) { + if (c.getGroupSelector().getClass().isAssignableFrom(TermGroupSelector.class)) { + TopGroupsCollector collector = (TopGroupsCollector) c; + return collector.getTopGroups(withinGroupOffset); + } else if (c.getGroupSelector().getClass().isAssignableFrom(ValueSourceGroupSelector.class)) { + TopGroupsCollector collector = (TopGroupsCollector) c; + TopGroups mvalTopGroups = collector.getTopGroups(withinGroupOffset); List> groups = new ArrayList<>(mvalTopGroups.groups.length); for (GroupDocs mvalGd : mvalTopGroups.groups) { BytesRef groupValue = mvalGd.groupValue.exists() ? ((MutableValueStr) mvalGd.groupValue).value.get() : null; @@ -952,8 +935,8 @@ public void testRandom() throws Exception { // Get 1st pass top groups using shards final TopGroups topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort, - groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, true, false); - final SecondPassGroupingCollector c2; + groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, true, true); + final TopGroupsCollector c2; if (topGroups != null) { if (VERBOSE) { @@ -963,7 +946,7 @@ public void testRandom() throws Exception { } } - c2 = createSecondPassCollector(c1, groupField, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getScores, getMaxScores, fillFields); + c2 = createSecondPassCollector(c1, groupSort, docSort, groupOffset, docOffset + docsPerGroup, getScores, getMaxScores, fillFields); if (doCache) { if (cCache.isCached()) { if (VERBOSE) { @@ -1050,13 +1033,13 @@ public void testRandom() throws Exception { final boolean needsScores = getScores || getMaxScores || docSort == null; final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, sBlocks.createNormalizedWeight(lastDocInBlock, false)); - final TermAllGroupsCollector allGroupsCollector2; + final AllGroupsCollector allGroupsCollector2; final Collector c4; if (doAllGroups) { // NOTE: must be "group" and not "group_dv" // (groupField) because we didn't index doc // values in the block index: - allGroupsCollector2 = new TermAllGroupsCollector("group"); + allGroupsCollector2 = new AllGroupsCollector<>(new TermGroupSelector("group")); c4 = MultiCollector.wrap(c3, allGroupsCollector2); } else { allGroupsCollector2 = null; @@ -1223,7 +1206,7 @@ private TopGroups searchShards(IndexSearcher topSearcher, ShardSearche @SuppressWarnings({"unchecked","rawtypes"}) final TopGroups[] shardTopGroups = new TopGroups[subSearchers.length]; for(int shardIDX=0;shardIDX secondPassCollector = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX), + final TopGroupsCollector secondPassCollector = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX), groupField, mergedTopGroups, groupSort, docSort, docOffset + topNDocs, getScores, getMaxScores, true); subSearchers[shardIDX].search(w, secondPassCollector); shardTopGroups[shardIDX] = getTopGroups(secondPassCollector, 0); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/MultiTermHighlighting.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/MultiTermHighlighting.java index 89403d5628ef..15f4bddf7e5a 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/MultiTermHighlighting.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/MultiTermHighlighting.java @@ -28,12 +28,14 @@ import org.apache.lucene.search.AutomatonQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNotQuery; @@ -64,6 +66,8 @@ public static CharacterRunAutomaton[] extractAutomata(Query query, Predicate fieldMatcher, boolean lookInSpan, Function> preRewriteFunc) { + // TODO Lucene needs a Query visitor API! LUCENE-3041 + List list = new ArrayList<>(); Collection customSubQueries = preRewriteFunc.apply(query); if (customSubQueries != null) { @@ -79,6 +83,9 @@ public static CharacterRunAutomaton[] extractAutomata(Query query, } else if (query instanceof ConstantScoreQuery) { list.addAll(Arrays.asList(extractAutomata(((ConstantScoreQuery) query).getQuery(), fieldMatcher, lookInSpan, preRewriteFunc))); + } else if (query instanceof BoostQuery) { + list.addAll(Arrays.asList(extractAutomata(((BoostQuery)query).getQuery(), fieldMatcher, lookInSpan, + preRewriteFunc))); } else if (query instanceof DisjunctionMaxQuery) { for (Query sub : ((DisjunctionMaxQuery) query).getDisjuncts()) { list.addAll(Arrays.asList(extractAutomata(sub, fieldMatcher, lookInSpan, preRewriteFunc))); @@ -97,6 +104,9 @@ public static CharacterRunAutomaton[] extractAutomata(Query query, } else if (lookInSpan && query instanceof SpanPositionCheckQuery) { list.addAll(Arrays.asList(extractAutomata(((SpanPositionCheckQuery) query).getMatch(), fieldMatcher, lookInSpan, preRewriteFunc))); + } else if (lookInSpan && query instanceof SpanBoostQuery) { + list.addAll(Arrays.asList(extractAutomata(((SpanBoostQuery) query).getQuery(), fieldMatcher, lookInSpan, + preRewriteFunc))); } else if (lookInSpan && query instanceof SpanMultiTermQueryWrapper) { list.addAll(Arrays.asList(extractAutomata(((SpanMultiTermQueryWrapper) query).getWrappedQuery(), fieldMatcher, lookInSpan, preRewriteFunc))); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java index 4a4b7ede1962..57f174f6da61 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java @@ -38,6 +38,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; @@ -52,6 +53,7 @@ import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanNearQuery; @@ -163,7 +165,8 @@ public void testOnePrefix() throws Exception { IndexSearcher searcher = newSearcher(ir); UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer); - Query query = new PrefixQuery(new Term("body", "te")); + // wrap in a BoostQuery to also show we see inside it + Query query = new BoostQuery(new PrefixQuery(new Term("body", "te")), 2.0f); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits); String snippets[] = highlighter.highlight("body", query, topDocs); @@ -522,7 +525,9 @@ public void testSpanWildcard() throws Exception { IndexSearcher searcher = newSearcher(ir); UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer); - Query query = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); + // wrap in a SpanBoostQuery to also show we see inside it + Query query = new SpanBoostQuery( + new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))), 2.0f); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits); String snippets[] = highlighter.highlight("body", query, topDocs); diff --git a/lucene/ivy-versions.properties b/lucene/ivy-versions.properties index 6bd3f8d7b8b5..60abe9a56229 100644 --- a/lucene/ivy-versions.properties +++ b/lucene/ivy-versions.properties @@ -55,7 +55,7 @@ com.sun.jersey.version = 1.9 /hsqldb/hsqldb = 1.8.0.10 /info.ganglia.gmetric4j/gmetric4j = 1.0.7 -io.dropwizard.metrics.version = 3.1.2 +io.dropwizard.metrics.version = 3.2.2 /io.dropwizard.metrics/metrics-core = ${io.dropwizard.metrics.version} /io.dropwizard.metrics/metrics-ganglia = ${io.dropwizard.metrics.version} /io.dropwizard.metrics/metrics-graphite = ${io.dropwizard.metrics.version} @@ -265,5 +265,9 @@ org.slf4j.version = 1.7.7 /org.tukaani/xz = 1.5 /rome/rome = 1.0 + +ua.net.nlp.morfologik-ukrainian-search.version = 3.7.5 +/ua.net.nlp/morfologik-ukrainian-search = ${ua.net.nlp.morfologik-ukrainian-search.version} + /xerces/xercesImpl = 2.9.1 diff --git a/lucene/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 b/lucene/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 new file mode 100644 index 000000000000..8794e71fbe9b --- /dev/null +++ b/lucene/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 @@ -0,0 +1 @@ +2b8c8fbd740164d220ca7d18605b8b2092e163e9 diff --git a/lucene/licenses/morfologik-ukrainian-search-LICENSE-ASL.txt b/lucene/licenses/morfologik-ukrainian-search-LICENSE-ASL.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/lucene/licenses/morfologik-ukrainian-search-LICENSE-ASL.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lucene/licenses/morfologik-ukrainian-search-NOTICE.txt b/lucene/licenses/morfologik-ukrainian-search-NOTICE.txt new file mode 100644 index 000000000000..df3fa1d558be --- /dev/null +++ b/lucene/licenses/morfologik-ukrainian-search-NOTICE.txt @@ -0,0 +1,6 @@ +morfologik-ukrainian-search is a POS tag dictionary in morfologik format adjusted for searching. +It's part of dict_uk project (https://github.com/brown-uk/dict_uk) + +Note: to better fit into full-text search model this dictionary has all word forms in lower case but keeps lemmas for proper nouns in upper case. + +Licensed under Apache License 2.0. diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index 4bd72e993d69..a1f2b0789ee0 100644 --- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -1217,7 +1217,7 @@ public SortedSetDocValues getSortedSetDocValues(String field) { @Override public PointValues getPointValues(String fieldName) { Info info = fields.get(fieldName); - if (info.pointValues == null) { + if (info == null || info.pointValues == null) { return null; } return new MemoryIndexPointValues(info); @@ -1529,6 +1529,7 @@ private class MemoryIndexPointValues extends PointValues { MemoryIndexPointValues(Info info) { this.info = Objects.requireNonNull(info); + Objects.requireNonNull(info.pointValues, "Field does not have points"); } @Override @@ -1548,12 +1549,7 @@ public long estimatePointCount(IntersectVisitor visitor) { @Override public byte[] getMinPackedValue() throws IOException { - BytesRef[] values = info.pointValues; - if (values != null) { - return info.minPackedValue; - } else { - return null; - } + return info.minPackedValue; } @Override diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java index f34f30c295a5..1e20f30509b6 100644 --- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java +++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java @@ -40,6 +40,7 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.BinaryDocValues; @@ -422,6 +423,17 @@ public void testPointValues() throws Exception { } } + public void testMissingPoints() throws IOException { + Document doc = new Document(); + doc.add(new StoredField("field", 42)); + MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer); + IndexSearcher indexSearcher = mi.createSearcher(); + // field that exists but does not have points + assertNull(indexSearcher.getIndexReader().leaves().get(0).reader().getPointValues("field")); + // field that does not exist + assertNull(indexSearcher.getIndexReader().leaves().get(0).reader().getPointValues("some_missing_field")); + } + public void testPointValuesDoNotAffectPositionsOrOffset() throws Exception { MemoryIndex mi = new MemoryIndex(true, true); mi.addField(new TextField("text", "quick brown fox", Field.Store.NO), analyzer); diff --git a/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java b/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java index f07793ace6de..19427347fa7f 100644 --- a/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java +++ b/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java @@ -154,7 +154,7 @@ public long cost() { } @Override - public long longValue() { + public long longValue() throws IOException { // Keys are always expressed as a long so we obtain the // ordinal for our String-based artist name here return sdv.ordValue(); diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java index c3c98f7627e2..6c5253e0d34a 100644 --- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java +++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/HeatmapFacetCounter.java @@ -142,6 +142,9 @@ public static Heatmap calcFacets(PrefixTreeStrategy strategy, IndexReaderContext } final Heatmap heatmap = new Heatmap(columns, rows, ctx.makeRectangle(heatMinX, heatMaxX, heatMinY, heatMaxY)); + if (topAcceptDocs instanceof Bits.MatchNoBits) { + return heatmap; // short-circuit + } //All ancestor cell counts (of facetLevel) will be captured during facet visiting and applied later. If the data is // just points then there won't be any ancestors. diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java index b03fa3d152d2..a58765348ca5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java @@ -630,7 +630,7 @@ public long cost() { } @Override - public int ordValue() { + public int ordValue() throws IOException { assertThread("Sorted doc values", creationThread); assert exists; int ord = in.ordValue(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java index 45aabfe2a4b4..a412ed8f7c84 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/OwnCacheKeyMultiReader.java @@ -40,7 +40,8 @@ public CacheKey getKey() { @Override public void addClosedListener(ClosedListener listener) { - readerClosedListeners.add(listener); + ensureOpen(); + readerClosedListeners.add(listener); } }; diff --git a/lucene/tools/javadoc/ecj.javadocs.prefs b/lucene/tools/javadoc/ecj.javadocs.prefs index 61971ec6d1b2..5d96e88b5f40 100644 --- a/lucene/tools/javadoc/ecj.javadocs.prefs +++ b/lucene/tools/javadoc/ecj.javadocs.prefs @@ -8,7 +8,7 @@ org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 org.eclipse.jdt.core.compiler.compliance=1.8 org.eclipse.jdt.core.compiler.doc.comment.support=enabled -org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=ignore +org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=error org.eclipse.jdt.core.compiler.problem.assertIdentifier=error org.eclipse.jdt.core.compiler.problem.autoboxing=ignore org.eclipse.jdt.core.compiler.problem.comparingIdentical=error @@ -16,18 +16,18 @@ org.eclipse.jdt.core.compiler.problem.deadCode=ignore org.eclipse.jdt.core.compiler.problem.deprecation=ignore org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled -org.eclipse.jdt.core.compiler.problem.discouragedReference=ignore +org.eclipse.jdt.core.compiler.problem.discouragedReference=error org.eclipse.jdt.core.compiler.problem.emptyStatement=ignore org.eclipse.jdt.core.compiler.problem.enumIdentifier=error org.eclipse.jdt.core.compiler.problem.fallthroughCase=ignore org.eclipse.jdt.core.compiler.problem.fatalOptionalError=disabled org.eclipse.jdt.core.compiler.problem.fieldHiding=ignore -org.eclipse.jdt.core.compiler.problem.finalParameterBound=ignore -org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=ignore -org.eclipse.jdt.core.compiler.problem.forbiddenReference=ignore -org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=ignore +org.eclipse.jdt.core.compiler.problem.finalParameterBound=error +org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=error +org.eclipse.jdt.core.compiler.problem.forbiddenReference=error +org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=error org.eclipse.jdt.core.compiler.problem.includeNullInfoFromAsserts=disabled -org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=ignore +org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=error org.eclipse.jdt.core.compiler.problem.incompleteEnumSwitch=ignore org.eclipse.jdt.core.compiler.problem.indirectStaticAccess=ignore org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error @@ -36,7 +36,7 @@ org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsDeprecatedRef=disabled org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=disabled org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private org.eclipse.jdt.core.compiler.problem.localVariableHiding=ignore -org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=ignore +org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore org.eclipse.jdt.core.compiler.problem.missingHashCodeMethod=ignore org.eclipse.jdt.core.compiler.problem.missingJavadocComments=ignore @@ -52,10 +52,10 @@ org.eclipse.jdt.core.compiler.problem.missingOverrideAnnotationForInterfaceMetho org.eclipse.jdt.core.compiler.problem.missingSerialVersion=ignore org.eclipse.jdt.core.compiler.problem.missingSynchronizedOnInheritedMethod=ignore org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error -org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=ignore +org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore org.eclipse.jdt.core.compiler.problem.nullReference=ignore -org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=ignore +org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=error org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=ignore org.eclipse.jdt.core.compiler.problem.potentialNullReference=ignore diff --git a/lucene/tools/junit4/cached-timehints.txt b/lucene/tools/junit4/cached-timehints.txt index f2b8974a600a..cb3da9964d1f 100644 --- a/lucene/tools/junit4/cached-timehints.txt +++ b/lucene/tools/junit4/cached-timehints.txt @@ -813,7 +813,7 @@ org.apache.solr.EchoParamsTest=136,170,349,124,140,142,284 org.apache.solr.MinimalSchemaTest=304,316,467,304,297,755,309 org.apache.solr.OutputWriterTest=302,276,265,314,244,211,268 org.apache.solr.SampleTest=339,290,266,243,333,414,355 -org.apache.solr.SolrInfoMBeanTest=1090,1132,644,629,637,1023,735 +org.apache.solr.SolrInfoBeanTest=1090,1132,644,629,637,1023,735 org.apache.solr.TestDistributedGrouping=13095,9478,8420,9633,10692,9265,10893 org.apache.solr.TestDistributedSearch=11199,9886,16211,11367,11325,10717,10392 org.apache.solr.TestDocumentBuilder=10,10,9,13,10,9,10 diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 62e91899b772..62df77679e41 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -52,6 +52,24 @@ Upgrading from Solr 6.x * Deprecated method getNumericType() has been removed from FieldType. Use getNumberType() instead +* MBean names and attributes now follow hierarchical names used in metrics. This is reflected also in + /admin/mbeans and /admin/plugins output, and can be observed in the UI Plugins tab, because now all these + APIs get their data from the metrics API. The old (mostly flat) JMX view has been removed. + +* element in solrconfig.xml is no longer supported. Equivalent functionality can be configured in + solr.xml using element and SolrJmxReporter implementation. Limited back-compatibility + is offered by automatically adding a default instance of SolrJmxReporter if it's missing, AND when a local + MBean server is found (which can be activated either via ENABLE_REMOTE_JMX_OPTS in solr.in.sh or via system + properties, eg. -Dcom.sun.management.jmxremote). This default instance exports all Solr metrics from all + registries as hierarchical MBeans. This behavior can be also disabled by specifying a SolrJmxReporter + configuration with a boolean init arg "enabled" set to "false". For a more fine-grained control users + should explicitly specify at least one SolrJmxReporter configuration. + +* The sow (split-on-whitespace) request param now defaults to false (true in previous versions). + This affects the edismax and standard/"lucene" query parsers: if the sow param is not specified, + query text will not be split on whitespace before analysis. See + https://lucidworks.com/2017/04/18/multi-word-synonyms-solr-adds-query-time-support/ . + New Features ---------------------- * SOLR-9857, SOLR-9858: Collect aggregated metrics from nodes and shard leaders in overseer. (ab) @@ -65,6 +83,8 @@ New Features * SOLR-10393: Adds UUID Streaming Evaluator (Dennis Gove) +* SOLR-10046: Add UninvertDocValuesMergePolicyFactory class. (Keith Laban, Christine Poerschke) + Bug Fixes ---------------------- * SOLR-9262: Connection and read timeouts are being ignored by UpdateShardHandler after SOLR-4509. @@ -96,7 +116,18 @@ Other Changes * SOLR-10347: Removed index level boost support from "documents" section of the admin UI (Amrit Sarkar via Tomás Fernández Löbbe) ----------------------- +* SOLR-9959: SolrInfoMBean category and hierarchy cleanup. Per-component statistics are now obtained from + the metrics API, legacy JMX support has been replaced with SolrJmxReporter functionality. Several reporter + improvements (support for multiple prefix filters, "enabled" flag, reuse of service clients). (ab) + +* SOLR-10418: Expose safe system properties via metrics API as 'system.properties' in 'solr.jvm' group. + Add support for selecting specific properties from any compound metric using 'property' parameter to + /admin/metrics handler. (ab) + +* SOLR-10557: Make "compact" format default for /admin/metrics. (ab) + +* SOLR-10310: By default, stop splitting on whitespace prior to analysis + in edismax and standard/"lucene" query parsers. (Steve Rowe) ================== 6.6.0 ================== @@ -116,6 +147,10 @@ Upgrade Notes * Solr contribs map-reduce, morphlines-core and morphlines-cell have been removed. +* JSON Facet API now uses hyper-log-log for numBuckets cardinality calculation and + calculates cardinality before filtering buckets by any mincount greater than 1. + + Detailed Change List ---------------------- @@ -124,8 +159,6 @@ New Features * SOLR-9992: Add support for grouping with PointFIelds. (Cao Manh Dat) -* SOLR-10046: Add UninvertDocValuesMergePolicyFactory class. (Keith Laban, Christine Poerschke) - * SOLR-9994: Add support for CollapseQParser with PointFields. (Varun Thacker, Cao Manh Dat) * SOLR-10076: Hide keystore and truststore passwords from /admin/info/* outputs. (Mano Kovacs via Mark Miller) @@ -139,6 +172,25 @@ New Features * SOLR-10239: MOVEREPLICA API (Cao Manh Dat, Noble Paul, shalin) +* SOLR-9936: Allow configuration for recoveryExecutor thread pool size. (Tim Owen via Mark Miller) + +* SOLR-10447: Collections API now supports a LISTALIASES command to return a list of all collection aliases. + (Yago Riveiro, Ishan Chattopadhyaya, Mark Miller, Steve Molloy, Shawn Heisey, Mike Drob, janhoy) + +* SOLR-10446: CloudSolrClient can now be initialized using the base URL of a Solr instance instead of + ZooKeeper hosts. This is possible through the use of newly introduced HttpClusterStateProvider. + To fetch a list of collection aliases, this depends on LISTALIASES command, and hence this way of + initializing CloudSolrClient would not work if you have collection aliases on older versions of Solr + server that doesn't support LISTALIASES. (Ishan Chattopadhyaya, Noble Paul) + +* SOLR-10082: Variance and Standard Deviation aggregators for the JSON Facet API. + Example: json.facet={x:"stddev(field1)", y:"variance(field2)"} + (Rustam Hashimov, yonik) + +* SOLR-10505: Add multi-field support to TermsComponent when requesting terms' statistics. (Shai Erera) + +* SOLR-10537: SolrJ: Added SolrParams.toLocalParamsString() and ClientUtils.encodeLocalParamVal. (David Smiley) + Optimizations ---------------------- @@ -146,6 +198,15 @@ Optimizations instance if it already is modifiable, otherwise creates a new ModifiableSolrParams instance. (Jörg Rathlev via Koji) +* SOLR-10499: facet.heatmap is now significantly faster when the docset (base query) matches everything and there are no + deleted docs. It's also faster when the docset matches a small fraction of the index or none. (David Smiley) + +* SOLR-9217: Reduced heap consumption for filter({!join ... score=...}) + (Andrey Kudryavtsev, Gopikannan Venugopalsamy via Mikhail Khludnev) + +* SOLR-10548: JSON Facet API now uses hyper-log-log++ for determining the number of buckets + when merging requests from a multi-shard distributed request. (yonik) + Bug Fixes ---------------------- * SOLR-10281: ADMIN_PATHS is duplicated in two places and inconsistent. This can cause automatic @@ -165,6 +226,34 @@ Bug Fixes * SOLR-10264: Fixes multi-term synonym parsing in ManagedSynonymFilterFactory. (Jörg Rathlev, Steve Rowe, Christine Poerschke) + +* SOLR-8807: fix Spellcheck "collateMaxCollectDocs" parameter to work with queries that have the + CollpasingQParserPlugin applied. (James Dyer) + +* SOLR-10474: TestPointFields.testPointFieldReturn() depends on order of unsorted hits. (Steve Rowe) + +* SOLR-10473: Correct LBHttpSolrClient's confusing SolrServerException message when timeAllowed is exceeded. + (Christine Poerschke) + +* SOLR-10047: Mismatched Docvalues segments cause exception in Sorting/Faceting. Solr now uninverts per segment + to avoid such exceptions. (Keith Laban via shalin) + +* SOLR-10472: Fixed uninversion (aka: FieldCache) bugs with the numeric PointField classes, and CurrencyField (hossman) + +* SOLR-5127: Multiple highlight fields and wildcards are now supported e.g. hl.fl=title,text_* + (Sven-S. Porst, Daniel Debray, Simon Endele, Christine Poerschke) + +* SOLR-10493: Investigate SolrCloudExampleTest failures. (Erick Erickson) + +* SOLR-10552: JSON Facet API numBuckets was not consistent between distributed and non-distributed requests + when there was a mincount > 1. This has been corrected by changing numBuckets cardinality processing to + ignore mincount > 1 for non-distributed requests. (yonik) + +* SOLR-10520: child.facet.field doubled counts at least when rows>0. (Dr. Oleg Savrasov via Mikhail Khludnev) + +* SOLR-10480: Full pagination in JSON Facet API using offset does not work. (yonik) + +* SOLR-10526: facet.heatmap didn't honor facet exclusions ('ex') for distributed search. (David Smiley) Other Changes ---------------------- @@ -207,6 +296,14 @@ Other Changes * SOLR-10440: LBHttpSolrClient.doRequest is now always wrapped in a Mapped Diagnostic Context (MDC). (Christine Poerschke) +* SOLR-10429: UpdateRequest#getRoutes()should copy the response parser (noble) + +* SOLR-10007: Clean up references to CoreContainer and CoreDescriptors (Erick Erickson) + +* SOLR-10151: Use monotonically incrementing counter for doc ids in TestRecovery. (Peter Szantai-Kis, Mano Kovacs via Mark Miller) + +* SOLR-10514: Upgrade Metrics library to 3.2.2. (ab) + ================== 6.5.1 ================== Bug Fixes @@ -237,6 +334,13 @@ Bug Fixes * SOLR-10444: SQL interface does not use client cache. (Joel Bernstein) +* SOLR-10420: Solr 6.x leaking one SolrZkClient instance per second (Scott Blum, Cao Manh Dat, Markus Jelsma, Steve Rowe) + +* SOLR-10439: The new 'large' attribute had been forgotten in /schema/fields?showDefaults=true + +* SOLR-10527: edismax with sow=false fails to create dismax-per-term queries when any field is boosted. + (Steve Rowe) + ================== 6.5.0 ================== Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release. diff --git a/solr/contrib/analytics/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java b/solr/contrib/analytics/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java index b22dcb5bdf42..1670ad5689a0 100644 --- a/solr/contrib/analytics/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java +++ b/solr/contrib/analytics/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java @@ -16,11 +16,11 @@ */ package org.apache.solr.analytics.plugin; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import com.codahale.metrics.Timer; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.util.stats.MetricUtils; public class AnalyticsStatisticsCollector { @@ -85,17 +85,20 @@ public void endRequest() { currentTimer.stop(); } - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap<>(); - lst.add("requests", numRequests.longValue()); - lst.add("analyticsRequests", numAnalyticsRequests.longValue()); - lst.add("statsRequests", numStatsRequests.longValue()); - lst.add("statsCollected", numCollectedStats.longValue()); - lst.add("fieldFacets", numFieldFacets.longValue()); - lst.add("rangeFacets", numRangeFacets.longValue()); - lst.add("queryFacets", numQueryFacets.longValue()); - lst.add("queriesInQueryFacets", numQueries.longValue()); - MetricUtils.addMetrics(lst, requestTimes); - return lst; + public Map getStatistics() { + + Map map = new HashMap<>(); + MetricUtils.convertTimer("", requestTimes, MetricUtils.PropertyFilter.ALL, false, false, (k, v) -> { + map.putAll((Map)v); + }); + map.put("requests", numRequests.longValue()); + map.put("analyticsRequests", numAnalyticsRequests.longValue()); + map.put("statsRequests", numStatsRequests.longValue()); + map.put("statsCollected", numCollectedStats.longValue()); + map.put("fieldFacets", numFieldFacets.longValue()); + map.put("rangeFacets", numRangeFacets.longValue()); + map.put("queryFacets", numQueryFacets.longValue()); + map.put("queriesInQueryFacets", numQueries.longValue()); + return map; } } diff --git a/solr/contrib/analytics/src/java/org/apache/solr/handler/component/AnalyticsComponent.java b/solr/contrib/analytics/src/java/org/apache/solr/handler/component/AnalyticsComponent.java index f33b6c7a6dc8..505533b1cc5a 100644 --- a/solr/contrib/analytics/src/java/org/apache/solr/handler/component/AnalyticsComponent.java +++ b/solr/contrib/analytics/src/java/org/apache/solr/handler/component/AnalyticsComponent.java @@ -22,9 +22,11 @@ import org.apache.solr.analytics.request.AnalyticsStats; import org.apache.solr.analytics.util.AnalyticsParams; import org.apache.solr.common.params.SolrParams; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; -public class AnalyticsComponent extends SearchComponent { +public class AnalyticsComponent extends SearchComponent implements SolrMetricProducer { public static final String COMPONENT_NAME = "analytics"; private final AnalyticsStatisticsCollector analyticsCollector = new AnalyticsStatisticsCollector();; @@ -80,12 +82,8 @@ public String getDescription() { } @Override - public String getVersion() { - return getClass().getPackage().getSpecificationVersion(); - } - - @Override - public NamedList getStatistics() { - return analyticsCollector.getStatistics(); + public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { + MetricsMap metrics = new MetricsMap((detailed, map) -> map.putAll(analyticsCollector.getStatistics())); + manager.registerGauge(this, registry, metrics, true, getClass().getSimpleName(), getCategory().toString(), scope); } } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java index 0766c7f838e8..faea3baab183 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java @@ -26,12 +26,13 @@ import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.ContentStreamBase; import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.ContentStream; import org.apache.solr.common.util.StrUtils; import org.apache.solr.core.SolrCore; import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.handler.RequestHandlerBase; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.response.RawResponseWriter; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; @@ -74,6 +75,8 @@ public class DataImportHandler extends RequestHandlerBase implements private String myName = "dataimport"; + private MetricsMap metrics; + private static final String PARAM_WRITER_IMPL = "writerImpl"; private static final String DEFAULT_WRITER_NAME = "SolrWriter"; @@ -260,41 +263,33 @@ public boolean upload(SolrInputDocument document) { }; } } - - @Override - @SuppressWarnings("unchecked") - public NamedList getStatistics() { - if (importer == null) - return super.getStatistics(); - DocBuilder.Statistics cumulative = importer.cumulativeStatistics; - SimpleOrderedMap result = new SimpleOrderedMap(); - - result.add("Status", importer.getStatus().toString()); + @Override + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + super.initializeMetrics(manager, registryName, scope); + metrics = new MetricsMap((detailed, map) -> { + if (importer != null) { + DocBuilder.Statistics cumulative = importer.cumulativeStatistics; - if (importer.docBuilder != null) { - DocBuilder.Statistics running = importer.docBuilder.importStatistics; - result.add("Documents Processed", running.docCount); - result.add("Requests made to DataSource", running.queryCount); - result.add("Rows Fetched", running.rowsCount); - result.add("Documents Deleted", running.deletedDocCount); - result.add("Documents Skipped", running.skipDocCount); - } + map.put("Status", importer.getStatus().toString()); - result.add(DataImporter.MSG.TOTAL_DOC_PROCESSED, cumulative.docCount); - result.add(DataImporter.MSG.TOTAL_QUERIES_EXECUTED, cumulative.queryCount); - result.add(DataImporter.MSG.TOTAL_ROWS_EXECUTED, cumulative.rowsCount); - result.add(DataImporter.MSG.TOTAL_DOCS_DELETED, cumulative.deletedDocCount); - result.add(DataImporter.MSG.TOTAL_DOCS_SKIPPED, cumulative.skipDocCount); + if (importer.docBuilder != null) { + DocBuilder.Statistics running = importer.docBuilder.importStatistics; + map.put("Documents Processed", running.docCount); + map.put("Requests made to DataSource", running.queryCount); + map.put("Rows Fetched", running.rowsCount); + map.put("Documents Deleted", running.deletedDocCount); + map.put("Documents Skipped", running.skipDocCount); + } - NamedList requestStatistics = super.getStatistics(); - if (requestStatistics != null) { - for (int i = 0; i < requestStatistics.size(); i++) { - result.add(requestStatistics.getName(i), requestStatistics.getVal(i)); + map.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, cumulative.docCount); + map.put(DataImporter.MSG.TOTAL_QUERIES_EXECUTED, cumulative.queryCount); + map.put(DataImporter.MSG.TOTAL_ROWS_EXECUTED, cumulative.rowsCount); + map.put(DataImporter.MSG.TOTAL_DOCS_DELETED, cumulative.deletedDocCount); + map.put(DataImporter.MSG.TOTAL_DOCS_SKIPPED, cumulative.skipDocCount); } - } - - return result; + }); + manager.registerGauge(this, registryName, metrics, true, "importer", getCategory().toString(), scope); } // //////////////////////SolrInfoMBeans methods ////////////////////// diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java index 2f5e9b0bd675..a49b4f6285bf 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java @@ -288,8 +288,7 @@ public DIHConfiguration readFromXml(Document xmlDocument) { if (propertyWriterTags.isEmpty()) { boolean zookeeper = false; if (this.core != null - && this.core.getCoreDescriptor().getCoreContainer() - .isZooKeeperAware()) { + && this.core.getCoreContainer().isZooKeeperAware()) { zookeeper = true; } pw = new PropertyWriter(zookeeper ? "ZKPropertiesWriter" diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java index a3d4756a186c..f6a62aa7df42 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java @@ -314,7 +314,7 @@ private void finish(Map lastIndexTimeProps) { } void handleError(String message, Exception e) { - if (!dataImporter.getCore().getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { + if (!dataImporter.getCore().getCoreContainer().isZooKeeperAware()) { writer.rollback(); } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ZKPropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ZKPropertiesWriter.java index 2d548726577c..64a776c9ccc9 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ZKPropertiesWriter.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ZKPropertiesWriter.java @@ -44,7 +44,7 @@ public class ZKPropertiesWriter extends SimplePropertiesWriter { @Override public void init(DataImporter dataImporter, Map params) { super.init(dataImporter, params); - zkClient = dataImporter.getCore().getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + zkClient = dataImporter.getCore().getCoreContainer().getZkController().getZkClient(); } @Override diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java b/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java index a659d0c64397..c83a5a7bb5d3 100644 --- a/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java +++ b/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java @@ -16,15 +16,15 @@ */ package org.apache.solr.response; -import org.apache.velocity.runtime.resource.loader.ResourceLoader; -import org.apache.velocity.runtime.resource.Resource; -import org.apache.velocity.exception.ResourceNotFoundException; -import org.apache.commons.collections.ExtendedProperties; -import org.apache.solr.core.SolrResourceLoader; - import java.io.IOException; import java.io.InputStream; +import org.apache.commons.collections.ExtendedProperties; +import org.apache.solr.core.SolrResourceLoader; +import org.apache.velocity.exception.ResourceNotFoundException; +import org.apache.velocity.runtime.resource.Resource; +import org.apache.velocity.runtime.resource.loader.ResourceLoader; + /** * Velocity resource loader wrapper around Solr resource loader */ diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java index 8de5fc92fafa..948452e4f4e5 100644 --- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java +++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java @@ -89,7 +89,7 @@ private static CoreContainer load(CoreContainer cc) { * Create an EmbeddedSolrServer wrapping a particular SolrCore */ public EmbeddedSolrServer(SolrCore core) { - this(core.getCoreDescriptor().getCoreContainer(), core.getName()); + this(core.getCoreContainer(), core.getName()); } /** diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java index fdc7b02dae57..719b1d171613 100644 --- a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java +++ b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java @@ -21,6 +21,7 @@ import java.util.Properties; import com.google.common.base.Strings; +import org.apache.solr.common.StringUtils; import org.apache.solr.common.cloud.Replica; import org.apache.solr.core.CoreDescriptor; import org.apache.solr.util.PropertiesUtil; @@ -135,4 +136,23 @@ public void setCoreNodeName(String nodeName) { if(nodeName==null) cd.getPersistableStandardProperties().remove(CoreDescriptor.CORE_NODE_NAME); else cd.getPersistableStandardProperties().setProperty(CoreDescriptor.CORE_NODE_NAME, nodeName); } + + public void reload(CloudDescriptor reloadFrom) { + if (reloadFrom == null) return; + + setShardId(StringUtils.isEmpty(reloadFrom.getShardId()) ? getShardId() : reloadFrom.getShardId()); + setCollectionName(StringUtils.isEmpty(reloadFrom.getCollectionName()) ? getCollectionName() : reloadFrom.getCollectionName()); + setRoles(StringUtils.isEmpty(reloadFrom.getRoles()) ? getRoles() : reloadFrom.getRoles()); + if (reloadFrom.getNumShards() != null) { + setNumShards(reloadFrom.getNumShards()); + } + setCoreNodeName(StringUtils.isEmpty(reloadFrom.getCoreNodeName()) ? getCoreNodeName() : reloadFrom.getCoreNodeName()); + setLeader(reloadFrom.isLeader); + setHasRegistered(reloadFrom.hasRegistered); + setLastPublished(reloadFrom.getLastPublished()); + + for (Map.Entry ent : reloadFrom.getParams().entrySet()) { + collectionParams.put(ent.getKey(), ent.getValue()); + } + } } diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java index c62efa6a694c..ac09621b03c3 100644 --- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java +++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java @@ -68,7 +68,7 @@ public static void checkSharedFSFailoverReplaced(CoreContainer cc, CoreDescripto if (thisCnn != null && thisCnn.equals(cnn) && !thisBaseUrl.equals(baseUrl)) { - if (cc.getCoreNames().contains(desc.getName())) { + if (cc.getLoadedCoreNames().contains(desc.getName())) { cc.unload(desc.getName()); } diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java b/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java index e7ac5e5fd161..6c28cc69c3fe 100644 --- a/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java +++ b/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java @@ -86,10 +86,9 @@ public class DistributedQueue { */ private final Condition changed = updateLock.newCondition(); - /** - * If non-null, the last watcher to listen for child changes. If null, the in-memory contents are dirty. - */ - private ChildWatcher lastWatcher = null; + private boolean isDirty = true; + + private int watcherCount = 0; public DistributedQueue(SolrZkClient zookeeper, String dir) { this(zookeeper, dir, new Overseer.Stats()); @@ -238,10 +237,10 @@ public void offer(byte[] data) throws KeeperException, InterruptedException { try { while (true) { try { - // We don't need to explicitly set isDirty here; if there is a watcher, it will - // see the update and set the bit itself; if there is no watcher we can defer - // the update anyway. + // Explicitly set isDirty here so that synchronous same-thread calls behave as expected. + // This will get set again when the watcher actually fires, but that's ok. zookeeper.create(dir + "/" + PREFIX, data, CreateMode.PERSISTENT_SEQUENTIAL, true); + isDirty = true; return; } catch (KeeperException.NoNodeException e) { try { @@ -269,15 +268,25 @@ public Overseer.Stats getStats() { private String firstChild(boolean remove) throws KeeperException, InterruptedException { updateLock.lockInterruptibly(); try { - // If we're not in a dirty state, and we have in-memory children, return from in-memory. - if (lastWatcher != null && !knownChildren.isEmpty()) { - return remove ? knownChildren.pollFirst() : knownChildren.first(); + if (!isDirty) { + // If we're not in a dirty state... + if (!knownChildren.isEmpty()) { + // and we have in-memory children, return from in-memory. + return remove ? knownChildren.pollFirst() : knownChildren.first(); + } else { + // otherwise there's nothing to return + return null; + } } - // Try to fetch an updated list of children from ZK. - ChildWatcher newWatcher = new ChildWatcher(); + // Dirty, try to fetch an updated list of children from ZK. + // Only set a new watcher if there isn't already a watcher. + ChildWatcher newWatcher = (watcherCount == 0) ? new ChildWatcher() : null; knownChildren = fetchZkChildren(newWatcher); - lastWatcher = newWatcher; // only set after fetchZkChildren returns successfully + if (newWatcher != null) { + watcherCount++; // watcher was successfully set + } + isDirty = false; if (knownChildren.isEmpty()) { return null; } @@ -422,16 +431,25 @@ private byte[] removeFirst() throws KeeperException, InterruptedException { } } - @VisibleForTesting boolean hasWatcher() throws InterruptedException { + @VisibleForTesting int watcherCount() throws InterruptedException { updateLock.lockInterruptibly(); try { - return lastWatcher != null; + return watcherCount; } finally { updateLock.unlock(); } } - private class ChildWatcher implements Watcher { + @VisibleForTesting boolean isDirty() throws InterruptedException { + updateLock.lockInterruptibly(); + try { + return isDirty; + } finally { + updateLock.unlock(); + } + } + + @VisibleForTesting class ChildWatcher implements Watcher { @Override public void process(WatchedEvent event) { @@ -441,10 +459,8 @@ public void process(WatchedEvent event) { } updateLock.lock(); try { - // this watcher is automatically cleared when fired - if (lastWatcher == this) { - lastWatcher = null; - } + isDirty = true; + watcherCount--; // optimistically signal any waiters that the queue may not be empty now, so they can wake up and retry changed.signalAll(); } finally { diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java index 6e8dbdabf81e..bdbeca9d568c 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java +++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java @@ -292,7 +292,7 @@ void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws Kee if (cc.isShutDown()) { return; } else { - throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getCoreNames()); + throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames()); } } MDCLoggingContext.setCore(core); @@ -332,7 +332,7 @@ void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws Kee if (!zkController.getCoreContainer().isShutDown()) { cancelElection(); throw new SolrException(ErrorCode.SERVER_ERROR, - "SolrCore not found:" + coreName + " in " + cc.getCoreNames()); + "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames()); } else { return; } @@ -402,7 +402,7 @@ void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws Kee RefCounted searchHolder = core.getNewestSearcher(false); SolrIndexSearcher searcher = searchHolder.get(); try { - log.debug(core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName() + " synched " + log.debug(core.getCoreContainer().getZkController().getNodeName() + " synched " + searcher.search(new MatchAllDocsQuery(), 1).totalHits); } finally { searchHolder.decref(); @@ -462,7 +462,7 @@ void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws Kee try (SolrCore core = cc.getCore(coreName)) { if (core == null) { - log.debug("SolrCore not found:" + coreName + " in " + cc.getCoreNames()); + log.debug("SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames()); return; } diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java index 2cbc39456077..496d0826d705 100644 --- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java +++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java @@ -233,7 +233,7 @@ final private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderp SolrIndexSearcher searcher = searchHolder.get(); Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null); try { - LOG.debug(core.getCoreDescriptor().getCoreContainer() + LOG.debug(core.getCoreContainer() .getZkController().getNodeName() + " replicated " + searcher.search(new MatchAllDocsQuery(), 1).totalHits @@ -641,7 +641,7 @@ final private void cloudDebugLog(SolrCore core, String op) { SolrIndexSearcher searcher = searchHolder.get(); try { final int totalHits = searcher.search(new MatchAllDocsQuery(), 1).totalHits; - final String nodeName = core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName(); + final String nodeName = core.getCoreContainer().getZkController().getNodeName(); LOG.debug("[{}] {} [{} total hits]", nodeName, op, totalHits); } finally { searchHolder.decref(); diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java index d7fded907a92..817b371f7362 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java +++ b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java @@ -55,7 +55,7 @@ public void startReplication() throws InterruptedException { if (cc.isShutDown()) { return; } else { - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getCoreNames()); + throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames()); } } SolrConfig.UpdateHandlerInfo uinfo = core.getSolrConfig().getUpdateHandlerInfo(); diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java index 677bf2906213..b337bd022cae 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java +++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java @@ -1168,7 +1168,7 @@ public void publish(final CoreDescriptor cd, final Replica.State state, boolean MDCLoggingContext.setCore(core); } } else { - MDCLoggingContext.setCoreDescriptor(cd); + MDCLoggingContext.setCoreDescriptor(cc, cd); } try { String collection = cd.getCloudDescriptor().getCollectionName(); diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java index 00e27c3a809c..a4e998d9e38e 100644 --- a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java +++ b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java @@ -45,7 +45,7 @@ public Map invoke(SolrQueryRequest req) { Map result = new HashMap<>(); CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName()); if (req.getParams().getInt(CORES, -1) == 1) { - result.put(CORES, cc.getCoreNames().size()); + result.put(CORES, cc.getLoadedCoreNames().size()); } if (req.getParams().getInt(DISK, -1) == 1) { try { diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java index 1ef036aa2325..28c1eafee3f0 100644 --- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java +++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java @@ -81,6 +81,7 @@ import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.request.SolrRequestHandler; +import org.apache.solr.search.SolrFieldCacheBean; import org.apache.solr.security.AuthenticationPlugin; import org.apache.solr.security.AuthorizationPlugin; import org.apache.solr.security.HttpClientBuilderPlugin; @@ -103,6 +104,7 @@ import static org.apache.solr.common.params.CommonParams.INFO_HANDLER_PATH; import static org.apache.solr.common.params.CommonParams.METRICS_PATH; import static org.apache.solr.common.params.CommonParams.ZK_PATH; +import static org.apache.solr.core.CorePropertiesLocator.PROPERTIES_FILENAME; import static org.apache.solr.security.AuthenticationPlugin.AUTHENTICATION_PLUGIN_PROP; /** @@ -121,7 +123,7 @@ public static class CoreLoadFailure { public final Exception exception; public CoreLoadFailure(CoreDescriptor cd, Exception loadFailure) { - this.cd = cd; + this.cd = new CoreDescriptor(cd.getName(), cd); this.exception = loadFailure; } } @@ -213,8 +215,6 @@ public PluginBag getRequestHandlers() { return this.containerHandlers; } - // private ClientConnectionManager clientConnectionManager = new PoolingClientConnectionManager(); - { log.debug("New CoreContainer " + System.identityHashCode(this)); } @@ -482,18 +482,18 @@ public void load() { metricManager = new SolrMetricManager(); coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService( - coreContainerWorkExecutor, - metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)), - SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoMBean.Category.CONTAINER.toString(), "threadPool")); + coreContainerWorkExecutor, null, + metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), + SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool")); shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader); if (shardHandlerFactory instanceof SolrMetricProducer) { SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory; - metricProducer.initializeMetrics(metricManager, SolrInfoMBean.Group.node.toString(), "httpShardHandler"); + metricProducer.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "httpShardHandler"); } updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig()); - updateShardHandler.initializeMetrics(metricManager, SolrInfoMBean.Group.node.toString(), "updateShardHandler"); + updateShardHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "updateShardHandler"); transientCoreCache = TransientSolrCoreCacheFactory.newInstance(loader, this); @@ -520,14 +520,14 @@ public void load() { // may want to add some configuration here in the future metricsCollectorHandler.init(null); containerHandlers.put(AUTHZ_PATH, securityConfHandler); - securityConfHandler.initializeMetrics(metricManager, SolrInfoMBean.Group.node.toString(), AUTHZ_PATH); + securityConfHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), AUTHZ_PATH); containerHandlers.put(AUTHC_PATH, securityConfHandler); if(pkiAuthenticationPlugin != null) containerHandlers.put(PKIAuthenticationPlugin.PATH, pkiAuthenticationPlugin.getRequestHandler()); - metricManager.loadReporters(cfg.getMetricReporterPlugins(), loader, null, SolrInfoMBean.Group.node); - metricManager.loadReporters(cfg.getMetricReporterPlugins(), loader, null, SolrInfoMBean.Group.jvm); - metricManager.loadReporters(cfg.getMetricReporterPlugins(), loader, null, SolrInfoMBean.Group.jetty); + metricManager.loadReporters(cfg.getMetricReporterPlugins(), loader, null, SolrInfoBean.Group.node); + metricManager.loadReporters(cfg.getMetricReporterPlugins(), loader, null, SolrInfoBean.Group.jvm); + metricManager.loadReporters(cfg.getMetricReporterPlugins(), loader, null, SolrInfoBean.Group.jetty); coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController); @@ -535,17 +535,25 @@ public void load() { // initialize gauges for reporting the number of cores and disk total/free - String registryName = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node); - metricManager.registerGauge(registryName, () -> solrCores.getCores().size(), - true, "loaded", SolrInfoMBean.Category.CONTAINER.toString(), "cores"); - metricManager.registerGauge(registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(), - true, "lazy",SolrInfoMBean.Category.CONTAINER.toString(), "cores"); - metricManager.registerGauge(registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(), - true, "unloaded",SolrInfoMBean.Category.CONTAINER.toString(), "cores"); - metricManager.registerGauge(registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(), - true, "totalSpace", SolrInfoMBean.Category.CONTAINER.toString(), "fs"); - metricManager.registerGauge(registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(), - true, "usableSpace", SolrInfoMBean.Category.CONTAINER.toString(), "fs"); + String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node); + metricManager.registerGauge(null, registryName, () -> solrCores.getCores().size(), + true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores"); + metricManager.registerGauge(null, registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(), + true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores"); + metricManager.registerGauge(null, registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(), + true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores"); + metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(), + true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs"); + metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(), + true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs"); + // add version information + metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getSpecificationVersion(), + true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version"); + metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getImplementationVersion(), + true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version"); + + SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean(); + fieldCacheBean.initializeMetrics(metricManager, registryName, null); if (isZooKeeperAware()) { metricManager.loadClusterReporters(cfg.getMetricReporterPlugins(), this); @@ -555,9 +563,9 @@ public void load() { ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService( ExecutorUtil.newMDCAwareFixedThreadPool( cfg.getCoreLoadThreadCount(isZooKeeperAware()), - new DefaultSolrThreadFactory("coreLoadExecutor")), - metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)), - SolrMetricManager.mkName("coreLoadExecutor",SolrInfoMBean.Category.CONTAINER.toString(), "threadPool")); + new DefaultSolrThreadFactory("coreLoadExecutor")), null, + metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), + SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool")); final List> futures = new ArrayList<>(); try { List cds = coresLocator.discover(this); @@ -571,7 +579,7 @@ public void load() { for (final CoreDescriptor cd : cds) { if (cd.isTransient() || !cd.isLoadOnStartup()) { - solrCores.putDynamicDescriptor(cd.getName(), cd); + getTransientCacheHandler().addTransientDescriptor(cd.getName(), cd); } else if (asyncSolrCoreLoad) { solrCores.markCoreAsLoading(cd); } @@ -685,14 +693,16 @@ public void shutdown() { ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor); if (metricManager != null) { - metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)); + metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)); + metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm)); + metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty)); } if (isZooKeeperAware()) { cancelCoreRecoveries(); zkSys.zkController.publishNodeAsDown(zkSys.zkController.getNodeName()); if (metricManager != null) { - metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.cluster)); + metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.cluster)); } } @@ -804,50 +814,35 @@ public CoresLocator getCoresLocator() { return coresLocator; } - protected SolrCore registerCore(String name, SolrCore core, boolean registerInZk, boolean skipRecovery) { + protected SolrCore registerCore(CoreDescriptor cd, SolrCore core, boolean registerInZk, boolean skipRecovery) { if( core == null ) { throw new RuntimeException( "Can not register a null core." ); } - - // We can register a core when creating them via the admin UI, so we need to ensure that the dynamic descriptors - // are up to date - CoreDescriptor cd = core.getCoreDescriptor(); - if ((cd.isTransient() || ! cd.isLoadOnStartup()) - && solrCores.getDynamicDescriptor(name) == null) { - // Store it away for later use. includes non-transient but not - // loaded at startup cores. - solrCores.putDynamicDescriptor(name, cd); - } - - SolrCore old; - + if (isShutDown) { core.close(); throw new IllegalStateException("This CoreContainer has been closed"); } - if (cd.isTransient()) { - old = solrCores.putTransientCore(cfg, name, core, loader); - } else { - old = solrCores.putCore(name, core); - } + SolrCore old = solrCores.putCore(cd, core); /* * set both the name of the descriptor and the name of the * core, since the descriptors name is used for persisting. */ - core.setName(name); + solrCores.addCoreDescriptor(new CoreDescriptor(cd.getName(), cd)); + core.setName(cd.getName()); - coreInitFailures.remove(name); + coreInitFailures.remove(cd.getName()); if( old == null || old == core) { - log.debug( "registering core: "+name ); + log.debug( "registering core: " + cd.getName() ); if (registerInZk) { zkSys.registerInZk(core, false, skipRecovery); } return null; } else { - log.debug( "replacing core: "+name ); + log.debug( "replacing core: " + cd.getName() ); old.close(); if (registerInZk) { zkSys.registerInZk(core, false, skipRecovery); @@ -875,10 +870,10 @@ public SolrCore create(String coreName, Map parameters) { */ public SolrCore create(String coreName, Path instancePath, Map parameters, boolean newCollection) { - CoreDescriptor cd = new CoreDescriptor(this, coreName, instancePath, parameters); + CoreDescriptor cd = new CoreDescriptor(coreName, instancePath, parameters, getContainerProperties(), isZooKeeperAware()); // TODO: There's a race here, isn't there? - if (getAllCoreNames().contains(coreName)) { + if (getLoadedCoreNames().contains(coreName)) { log.warn("Creating a core with existing name is not allowed"); // TODO: Shouldn't this be a BAD_REQUEST? throw new SolrException(ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists."); @@ -951,7 +946,7 @@ private SolrCore create(CoreDescriptor dcore, boolean publishState, boolean newC SolrCore core = null; try { - MDCLoggingContext.setCoreDescriptor(dcore); + MDCLoggingContext.setCoreDescriptor(this, dcore); SolrIdentifierValidator.validateCoreName(dcore.getName()); if (zkSys.getZkController() != null) { zkSys.getZkController().preRegister(dcore); @@ -961,7 +956,7 @@ private SolrCore create(CoreDescriptor dcore, boolean publishState, boolean newC dcore.setConfigSetTrusted(coreConfig.isTrusted()); log.info("Creating SolrCore '{}' using configuration from {}, trusted={}", dcore.getName(), coreConfig.getName(), dcore.isConfigSetTrusted()); try { - core = new SolrCore(dcore, coreConfig); + core = new SolrCore(this, dcore, coreConfig); } catch (SolrException e) { core = processCoreCreateException(e, dcore, coreConfig); } @@ -971,7 +966,7 @@ private SolrCore create(CoreDescriptor dcore, boolean publishState, boolean newC core.getUpdateHandler().getUpdateLog().recoverFromLog(); } - registerCore(dcore.getName(), core, publishState, newCollection); + registerCore(dcore, core, publishState, newCollection); return core; } catch (Exception e) { @@ -1035,7 +1030,7 @@ private SolrCore processCoreCreateException(SolrException original, CoreDescript if (leader != null && leader.getState() == State.ACTIVE) { log.info("Found active leader, will attempt to create fresh core and recover."); resetIndexDirectory(dcore, coreConfig); - return new SolrCore(dcore, coreConfig); + return new SolrCore(this, dcore, coreConfig); } } catch (SolrException se) { se.addSuppressed(original); @@ -1058,7 +1053,7 @@ private SolrCore processCoreCreateException(SolrException original, CoreDescript private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) { SolrConfig config = coreConfig.getSolrConfig(); - String registryName = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, dcore.getName()); + String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, dcore.getName()); DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName); String dataDir = SolrCore.findDataDir(df, null, config, dcore); @@ -1091,7 +1086,7 @@ public Collection getCores() { /** * @return a Collection of the names that loaded cores are mapped to */ - public Collection getCoreNames() { + public Collection getLoadedCoreNames() { return solrCores.getLoadedCoreNames(); } @@ -1136,7 +1131,37 @@ public Map getCoreInitFailures() { } - // ---------------- Core name related methods --------------- + // ---------------- Core name related methods --------------- + + private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) { + if (oldDesc == null) { + return null; + } + + CorePropertiesLocator cpl = new CorePropertiesLocator(null); + CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this); + + // Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties + // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world + // of core discovery without writing the core.properties file out first. + // + // TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway. + if (ret == null) { + oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up. + return oldDesc; + + } + // The CloudDescriptor bit here is created in a very convoluted way, requiring access to private methods + // in ZkController. When reloading, this behavior is identical to what used to happen where a copy of the old + // CoreDescriptor was just re-used. + + if (ret.getCloudDescriptor() != null) { + ret.getCloudDescriptor().reload(oldDesc.getCloudDescriptor()); + } + + return ret; + } + /** * Recreates a SolrCore. * While the new core is loading, requests will continue to be dispatched to @@ -1147,13 +1172,17 @@ public Map getCoreInitFailures() { public void reload(String name) { SolrCore core = solrCores.getCoreFromAnyList(name, false); if (core != null) { - CoreDescriptor cd = core.getCoreDescriptor(); + + // The underlying core properties files may have changed, we don't really know. So we have a (perhaps) stale + // CoreDescriptor and we need to reload it from the disk files + CoreDescriptor cd = reloadCoreDescriptor(core.getCoreDescriptor()); + solrCores.addCoreDescriptor(cd); try { solrCores.waitAddPendingCoreOps(cd.getName()); ConfigSet coreConfig = coreConfigService.getConfig(cd); log.info("Reloading SolrCore '{}' using configuration from {}", cd.getName(), coreConfig.getName()); SolrCore newCore = core.reload(coreConfig); - registerCore(cd.getName(), newCore, false, false); + registerCore(cd, newCore, false, false); if (getZkController() != null) { boolean onlyLeaderIndexes = getZkController().getClusterState().getCollection(cd.getCollectionName()).getRealtimeReplicas() == 1; if (onlyLeaderIndexes && !cd.getCloudDescriptor().isLeader()) { @@ -1232,7 +1261,6 @@ public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, b boolean close = solrCores.isLoadedNotPendingClose(name); SolrCore core = solrCores.remove(name); coresLocator.delete(this, cd); - if (core == null) { // transient core SolrCore.deleteUnloadedCore(cd, deleteDataDir, deleteInstanceDir); @@ -1247,7 +1275,7 @@ public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, b core.getSolrCoreState().cancelRecovery(); } - core.unloadOnClose(deleteIndexDir, deleteDataDir, deleteInstanceDir); + core.unloadOnClose(cd, deleteIndexDir, deleteDataDir, deleteInstanceDir); if (close) core.closeAndWait(); @@ -1261,6 +1289,9 @@ public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, b throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e); } } + if (deleteInstanceDir) { // we aren't going to reload this if we delete the instance dir. + solrCores.removeCoreDescriptor(cd); + } } public void rename(String name, String toName) { @@ -1270,8 +1301,15 @@ public void rename(String name, String toName) { String oldRegistryName = core.getCoreMetricManager().getRegistryName(); String newRegistryName = SolrCoreMetricManager.createRegistryName(core, toName); metricManager.swapRegistries(oldRegistryName, newRegistryName); - registerCore(toName, core, true, false); + // The old coreDescriptor is obsolete, so remove it. registerCore will put it back. + CoreDescriptor cd = core.getCoreDescriptor(); + solrCores.removeCoreDescriptor(cd); + cd.setProperty("name", toName); + solrCores.addCoreDescriptor(cd); + core.setName(toName); + registerCore(cd, core, true, false); SolrCore old = solrCores.remove(name); + coresLocator.rename(this, old.getCoreDescriptor(), core.getCoreDescriptor()); } } @@ -1286,12 +1324,7 @@ public List getCoreDescriptors() { } public CoreDescriptor getCoreDescriptor(String coreName) { - // TODO make this less hideous! - for (CoreDescriptor cd : getCoreDescriptors()) { - if (cd.getName().equals(coreName)) - return cd; - } - return null; + return solrCores.getCoreDescriptor(coreName); } public Path getCoreRootDirectory() { @@ -1311,29 +1344,32 @@ public SolrCore getCore(String name) { // Do this in two phases since we don't want to lock access to the cores over a load. SolrCore core = solrCores.getCoreFromAnyList(name, true); + // If a core is loaded, we're done just return it. if (core != null) { return core; } - // OK, it's not presently in any list, is it in the list of dynamic cores but not loaded yet? If so, load it. - CoreDescriptor desc = solrCores.getDynamicDescriptor(name); - if (desc == null) { //Nope, no transient core with this name + // If it's not yet loaded, we can check if it's had a core init failure and "do the right thing" + CoreDescriptor desc = solrCores.getCoreDescriptor(name); - // if there was an error initializing this core, throw a 500 - // error with the details for clients attempting to access it. - CoreLoadFailure loadFailure = getCoreInitFailures().get(name); - if (null != loadFailure) { - throw new SolrCoreInitializationException(name, loadFailure.exception); - } - // otherwise the user is simply asking for something that doesn't exist. - return null; + // if there was an error initializing this core, throw a 500 + // error with the details for clients attempting to access it. + CoreLoadFailure loadFailure = getCoreInitFailures().get(name); + if (null != loadFailure) { + throw new SolrCoreInitializationException(name, loadFailure.exception); } - + // This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores, + // we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that). + // But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if + // the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and + // TestLazyCores + if (desc == null || zkSys.getZkController() != null) return null; + // This will put an entry in pending core ops if the core isn't loaded core = solrCores.waitAddPendingCoreOps(name); if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off - // the wait as a consequence of shutting down. + // the wait as a consequence of shutting down. try { if (core == null) { if (zkSys.getZkController() != null) { @@ -1376,7 +1412,7 @@ protected T createHandler(String path, String handlerClass, Class clazz) containerHandlers.put(path, (SolrRequestHandler)handler); } if (handler instanceof SolrMetricProducer) { - ((SolrMetricProducer)handler).initializeMetrics(metricManager, SolrInfoMBean.Group.node.toString(), path); + ((SolrMetricProducer)handler).initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), path); } return handler; } diff --git a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java index 0dc2c71fe753..1747fa2ca12e 100644 --- a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java +++ b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java @@ -121,8 +121,6 @@ public Properties getPersistableUserProperties() { CloudDescriptor.NUM_SHARDS ); - private final CoreContainer coreContainer; - private final CloudDescriptor cloudDesc; private final Path instanceDir; @@ -139,8 +137,9 @@ public Properties getPersistableUserProperties() { /** The properties for this core, substitutable by resource loaders */ protected final Properties substitutableProperties = new Properties(); - public CoreDescriptor(CoreContainer container, String name, Path instanceDir, String... properties) { - this(container, name, instanceDir, toMap(properties)); + public CoreDescriptor(String name, Path instanceDir, Properties containerProperties, + boolean isZooKeeperAware, String... properties) { + this(name, instanceDir, toMap(properties), containerProperties, isZooKeeperAware); } private static Map toMap(String... properties) { @@ -154,12 +153,14 @@ private static Map toMap(String... properties) { /** * Create a new CoreDescriptor with a given name and instancedir - * @param container the CoreDescriptor's container * @param name the CoreDescriptor's name * @param instanceDir the CoreDescriptor's instancedir + * @param containerProperties the enclosing container properties for variable resolution + * @param isZooKeeperAware whether we are part of SolrCloud or not. */ - public CoreDescriptor(CoreContainer container, String name, Path instanceDir) { - this(container, name, instanceDir, Collections.emptyMap()); + public CoreDescriptor(String name, Path instanceDir, + Properties containerProperties, boolean isZooKeeperAware) { + this(name, instanceDir, Collections.emptyMap(), containerProperties, isZooKeeperAware); } /** @@ -168,7 +169,6 @@ public CoreDescriptor(CoreContainer container, String name, Path instanceDir) { * @param other the CoreDescriptor to copy */ public CoreDescriptor(String coreName, CoreDescriptor other) { - this.coreContainer = other.coreContainer; this.cloudDesc = other.cloudDesc; this.instanceDir = other.instanceDir; this.originalExtraProperties.putAll(other.originalExtraProperties); @@ -183,20 +183,20 @@ public CoreDescriptor(String coreName, CoreDescriptor other) { /** * Create a new CoreDescriptor. - * @param container the CoreDescriptor's container * @param name the CoreDescriptor's name * @param instanceDir a Path resolving to the instanceDir * @param coreProps a Map of the properties for this core + * @param containerProperties the properties from the enclosing container. + * @param isZooKeeperAware if true, we ar in SolrCloud mode. */ - public CoreDescriptor(CoreContainer container, String name, Path instanceDir, - Map coreProps) { - this.coreContainer = container; + + public CoreDescriptor(String name, Path instanceDir, Map coreProps, + Properties containerProperties, boolean isZooKeeperAware) { this.instanceDir = instanceDir; originalCoreProperties.setProperty(CORE_NAME, name); - Properties containerProperties = container.getContainerProperties(); name = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(name, CORE_NAME), containerProperties); @@ -221,7 +221,7 @@ public CoreDescriptor(CoreContainer container, String name, Path instanceDir, buildSubstitutableProperties(); // TODO maybe make this a CloudCoreDescriptor subclass? - if (container.isZooKeeperAware()) { + if (isZooKeeperAware) { cloudDesc = new CloudDescriptor(name, coreProperties, this); } else { @@ -324,15 +324,19 @@ public String getSchemaName() { public String getName() { return coreProperties.getProperty(CORE_NAME); } + + public void setProperty(String prop, String val) { + if (substitutableProperties.containsKey(prop)) { + substitutableProperties.setProperty(prop, val); + return; + } + coreProperties.setProperty(prop, val); + } public String getCollectionName() { return cloudDesc == null ? null : cloudDesc.getCollectionName(); } - public CoreContainer getCoreContainer() { - return coreContainer; - } - public CloudDescriptor getCloudDescriptor() { return cloudDesc; } diff --git a/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java b/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java index b37402b97e27..e942c9b90eec 100644 --- a/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java +++ b/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java @@ -134,8 +134,10 @@ public List discover(final CoreContainer cc) { public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals(PROPERTIES_FILENAME)) { CoreDescriptor cd = buildCoreDescriptor(file, cc); - logger.debug("Found core {} in {}", cd.getName(), cd.getInstanceDir()); - cds.add(cd); + if (cd != null) { + logger.debug("Found core {} in {}", cd.getName(), cd.getInstanceDir()); + cds.add(cd); + } return FileVisitResult.SKIP_SIBLINGS; } return FileVisitResult.CONTINUE; @@ -174,7 +176,9 @@ protected CoreDescriptor buildCoreDescriptor(Path propertiesFile, CoreContainer for (String key : coreProperties.stringPropertyNames()) { propMap.put(key, coreProperties.getProperty(key)); } - return new CoreDescriptor(cc, name, instanceDir, propMap); + CoreDescriptor ret = new CoreDescriptor(name, instanceDir, propMap, cc.getContainerProperties(), cc.isZooKeeperAware()); + ret.loadExtraProperties(); + return ret; } catch (IOException e) { logger.error("Couldn't load core descriptor from {}:{}", propertiesFile, e.toString()); diff --git a/solr/core/src/java/org/apache/solr/core/CoresLocator.java b/solr/core/src/java/org/apache/solr/core/CoresLocator.java index d4f40cd414e4..52927f1e3b63 100644 --- a/solr/core/src/java/org/apache/solr/core/CoresLocator.java +++ b/solr/core/src/java/org/apache/solr/core/CoresLocator.java @@ -68,5 +68,4 @@ public interface CoresLocator { * @return a list of all CoreDescriptors found */ public List discover(CoreContainer cc); - } diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java index cc24e6c6ab34..20824ab29c5d 100644 --- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java +++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java @@ -24,7 +24,6 @@ import java.lang.invoke.MethodHandles; import java.nio.file.NoSuchFileException; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; @@ -321,13 +320,6 @@ public String getDataHome(CoreDescriptor cd) throws IOException { return cd.getInstanceDir().resolve(cd.getDataDir()).toAbsolutePath().toString(); } - /** - * Optionally allow the DirectoryFactory to request registration of some MBeans. - */ - public Collection offerMBeans() { - return Collections.emptySet(); - } - public void cleanupOldIndexDirectories(final String dataDirPath, final String currentIndexDirPath, boolean afterCoreReload) { File dataDir = new File(dataDirPath); if (!dataDir.isDirectory()) { diff --git a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java index db953d38b1ef..260a991d1ce0 100644 --- a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java +++ b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java @@ -22,7 +22,6 @@ import java.lang.invoke.MethodHandles; import java.net.URLEncoder; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -51,6 +50,8 @@ import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.store.blockcache.BlockCache; import org.apache.solr.store.blockcache.BlockDirectory; import org.apache.solr.store.blockcache.BlockDirectoryCache; @@ -70,7 +71,7 @@ import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; -public class HdfsDirectoryFactory extends CachingDirectoryFactory implements SolrCoreAware { +public class HdfsDirectoryFactory extends CachingDirectoryFactory implements SolrCoreAware, SolrMetricProducer { private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); public static final String BLOCKCACHE_SLAB_COUNT = "solr.hdfs.blockcache.slab.count"; @@ -493,13 +494,14 @@ private void initKerberos() { } @Override - public Collection offerMBeans() { - return Arrays.asList(MetricsHolder.metrics, LocalityHolder.reporter); + public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { + MetricsHolder.metrics.initializeMetrics(manager, registry, scope); + LocalityHolder.reporter.initializeMetrics(manager, registry, scope); } @Override public void inform(SolrCore core) { - setHost(core.getCoreDescriptor().getCoreContainer().getHostName()); + setHost(core.getCoreContainer().getHostName()); } @VisibleForTesting diff --git a/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java b/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java deleted file mode 100644 index 4fb0dcd4d63e..000000000000 --- a/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java +++ /dev/null @@ -1,478 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.core; - -import javax.management.Attribute; -import javax.management.AttributeList; -import javax.management.AttributeNotFoundException; -import javax.management.DynamicMBean; -import javax.management.InstanceNotFoundException; -import javax.management.InvalidAttributeValueException; -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanException; -import javax.management.MBeanInfo; -import javax.management.MBeanServer; -import javax.management.MBeanServerFactory; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; -import javax.management.Query; -import javax.management.QueryExp; -import javax.management.ReflectionException; -import javax.management.openmbean.OpenMBeanAttributeInfoSupport; -import javax.management.openmbean.OpenType; -import javax.management.openmbean.SimpleType; -import javax.management.remote.JMXConnectorServer; -import javax.management.remote.JMXConnectorServerFactory; -import javax.management.remote.JMXServiceURL; -import java.lang.invoke.MethodHandles; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.Hashtable; -import java.util.List; -import java.util.Locale; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.solr.common.SolrException; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrConfig.JmxConfiguration; -import org.apache.solr.metrics.reporters.JmxObjectNameFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.solr.common.params.CommonParams.ID; -import static org.apache.solr.common.params.CommonParams.NAME; - -/** - *

    - * Responsible for finding (or creating) a MBeanServer from given configuration - * and registering all SolrInfoMBean objects with JMX. - *

    - *

    - * Please see http://wiki.apache.org/solr/SolrJmx for instructions on usage and configuration - *

    - * - * - * @see org.apache.solr.core.SolrConfig.JmxConfiguration - * @since solr 1.3 - */ -public class JmxMonitoredMap extends - ConcurrentHashMap { - private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - - private static final String REPORTER_NAME = "_jmx_"; - - // set to true to use cached statistics NamedLists between getMBeanInfo calls to work - // around over calling getStatistics on MBeanInfos when iterating over all attributes (SOLR-6586) - private final boolean useCachedStatsBetweenGetMBeanInfoCalls = Boolean.getBoolean("useCachedStatsBetweenGetMBeanInfoCalls"); - - private final MBeanServer server; - - private final String jmxRootName; - - private final String coreHashCode; - - private final JmxObjectNameFactory nameFactory; - - private final String registryName; - - public JmxMonitoredMap(String coreName, String coreHashCode, String registryName, - final JmxConfiguration jmxConfig) { - this.coreHashCode = coreHashCode; - this.registryName = registryName; - jmxRootName = (null != jmxConfig.rootName ? - jmxConfig.rootName - : ("solr" + (null != coreName ? "/" + coreName : ""))); - - if (jmxConfig.serviceUrl == null) { - List servers = null; - - if (jmxConfig.agentId == null) { - // Try to find the first MBeanServer - servers = MBeanServerFactory.findMBeanServer(null); - } else if (jmxConfig.agentId != null) { - // Try to find the first MBean server with the given agentId - servers = MBeanServerFactory.findMBeanServer(jmxConfig.agentId); - // throw Exception if no servers were found with the given agentId - if (servers == null || servers.isEmpty()) - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, - "No JMX Servers found with agentId: " + jmxConfig.agentId); - } - - if (servers == null || servers.isEmpty()) { - server = null; - nameFactory = null; - log.debug("No JMX servers found, not exposing Solr information with JMX."); - return; - } - server = servers.get(0); - log.info("JMX monitoring is enabled. Adding Solr mbeans to JMX Server: " - + server); - } else { - MBeanServer newServer = null; - try { - // Create a new MBeanServer with the given serviceUrl - newServer = MBeanServerFactory.newMBeanServer(); - JMXConnectorServer connector = JMXConnectorServerFactory - .newJMXConnectorServer(new JMXServiceURL(jmxConfig.serviceUrl), - null, newServer); - connector.start(); - log.info("JMX monitoring is enabled at " + jmxConfig.serviceUrl); - } catch (Exception e) { - // Release the reference - throw new RuntimeException("Could not start JMX monitoring ", e); - } - server = newServer; - } - nameFactory = new JmxObjectNameFactory(REPORTER_NAME + coreHashCode, registryName); - } - - /** - * Clears the map and unregisters all SolrInfoMBeans in the map from - * MBeanServer - */ - @Override - public void clear() { - if (server != null) { - QueryExp exp = Query.or(Query.eq(Query.attr("coreHashCode"), Query.value(coreHashCode)), - Query.eq(Query.attr("reporter"), Query.value(REPORTER_NAME + coreHashCode))); - - Set objectNames = null; - try { - objectNames = server.queryNames(null, exp); - } catch (Exception e) { - log.warn("Exception querying for mbeans", e); - } - - if (objectNames != null) { - for (ObjectName name : objectNames) { - try { - server.unregisterMBean(name); - } catch (InstanceNotFoundException ie) { - // ignore - someone else already deleted this one - } catch (Exception e) { - log.warn("Exception un-registering mbean {}", name, e); - } - } - } - } - - super.clear(); - } - - /** - * Adds the SolrInfoMBean to the map and registers the given SolrInfoMBean - * instance with the MBeanServer defined for this core. If a SolrInfoMBean is - * already registered with the MBeanServer then it is unregistered and then - * re-registered. - * - * @param key the JMX type name for this SolrInfoMBean - * @param infoBean the SolrInfoMBean instance to be registered - */ - @Override - public SolrInfoMBean put(String key, SolrInfoMBean infoBean) { - if (server != null && infoBean != null) { - try { - // back-compat name - ObjectName name = getObjectName(key, infoBean); - if (server.isRegistered(name)) - server.unregisterMBean(name); - SolrDynamicMBean mbean = new SolrDynamicMBean(coreHashCode, infoBean, useCachedStatsBetweenGetMBeanInfoCalls); - server.registerMBean(mbean, name); - // now register it also under new name - String beanName = createBeanName(infoBean, key); - name = nameFactory.createName(null, registryName, beanName); - if (server.isRegistered(name)) - server.unregisterMBean(name); - server.registerMBean(mbean, name); - } catch (Exception e) { - log.warn( "Failed to register info bean: key=" + key + ", infoBean=" + infoBean, e); - } - } - - return super.put(key, infoBean); - } - - private String createBeanName(SolrInfoMBean infoBean, String key) { - if (infoBean.getCategory() == null) { - throw new IllegalArgumentException("SolrInfoMBean.category must never be null: " + infoBean); - } - StringBuilder sb = new StringBuilder(); - sb.append(infoBean.getCategory().toString()); - sb.append('.'); - sb.append(key); - sb.append('.'); - sb.append(infoBean.getName()); - return sb.toString(); - } - - /** - * Removes the SolrInfoMBean object at the given key and unregisters it from - * MBeanServer - * - * @param key the JMX type name for this SolrInfoMBean - */ - @Override - public SolrInfoMBean remove(Object key) { - SolrInfoMBean infoBean = get(key); - if (infoBean != null) { - try { - unregister((String) key, infoBean); - } catch (RuntimeException e) { - log.warn( "Failed to unregister info bean: " + key, e); - } - } - return super.remove(key); - } - - private void unregister(String key, SolrInfoMBean infoBean) { - if (server == null) - return; - - try { - // remove legacy name - ObjectName name = getObjectName(key, infoBean); - if (server.isRegistered(name) && coreHashCode.equals(server.getAttribute(name, "coreHashCode"))) { - server.unregisterMBean(name); - } - // remove new name - String beanName = createBeanName(infoBean, key); - name = nameFactory.createName(null, registryName, beanName); - if (server.isRegistered(name)) { - server.unregisterMBean(name); - } - } catch (Exception e) { - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, - "Failed to unregister info bean: " + key, e); - } - } - - private ObjectName getObjectName(String key, SolrInfoMBean infoBean) - throws MalformedObjectNameException { - Hashtable map = new Hashtable<>(); - map.put("type", key); - if (infoBean.getName() != null && !"".equals(infoBean.getName())) { - map.put(ID, infoBean.getName()); - } - return ObjectName.getInstance(jmxRootName, map); - } - - /** For test verification */ - public MBeanServer getServer() { - return server; - } - - /** - * DynamicMBean is used to dynamically expose all SolrInfoMBean - * getStatistics() NameList keys as String getters. - */ - static class SolrDynamicMBean implements DynamicMBean { - private SolrInfoMBean infoBean; - - private HashSet staticStats; - - private String coreHashCode; - - private volatile NamedList cachedDynamicStats; - - private boolean useCachedStatsBetweenGetMBeanInfoCalls; - - public SolrDynamicMBean(String coreHashCode, SolrInfoMBean managedResource) { - this(coreHashCode, managedResource, false); - } - - public SolrDynamicMBean(String coreHashCode, SolrInfoMBean managedResource, boolean useCachedStatsBetweenGetMBeanInfoCalls) { - this.useCachedStatsBetweenGetMBeanInfoCalls = useCachedStatsBetweenGetMBeanInfoCalls; - if (managedResource instanceof JmxAugmentedSolrInfoMBean) { - final JmxAugmentedSolrInfoMBean jmxSpecific = (JmxAugmentedSolrInfoMBean)managedResource; - this.infoBean = new SolrInfoMBeanWrapper(jmxSpecific) { - @Override - public NamedList getStatistics() { return jmxSpecific.getStatisticsForJmx(); } - }; - } else { - this.infoBean = managedResource; - } - staticStats = new HashSet<>(); - - // For which getters are already available in SolrInfoMBean - staticStats.add(NAME); - staticStats.add("version"); - staticStats.add("description"); - staticStats.add("category"); - staticStats.add("source"); - this.coreHashCode = coreHashCode; - } - - @Override - public MBeanInfo getMBeanInfo() { - ArrayList attrInfoList = new ArrayList<>(); - - for (String stat : staticStats) { - attrInfoList.add(new MBeanAttributeInfo(stat, String.class.getName(), - null, true, false, false)); - } - - // add core's hashcode - attrInfoList.add(new MBeanAttributeInfo("coreHashCode", String.class.getName(), - null, true, false, false)); - - try { - NamedList dynamicStats = infoBean.getStatistics(); - - if (useCachedStatsBetweenGetMBeanInfoCalls) { - cachedDynamicStats = dynamicStats; - } - - if (dynamicStats != null) { - for (int i = 0; i < dynamicStats.size(); i++) { - String name = dynamicStats.getName(i); - if (staticStats.contains(name)) { - continue; - } - Class type = dynamicStats.get(name).getClass(); - OpenType typeBox = determineType(type); - if (type.equals(String.class) || typeBox == null) { - attrInfoList.add(new MBeanAttributeInfo(dynamicStats.getName(i), - String.class.getName(), null, true, false, false)); - } else { - attrInfoList.add(new OpenMBeanAttributeInfoSupport( - dynamicStats.getName(i), dynamicStats.getName(i), typeBox, - true, false, false)); - } - } - } - } catch (Exception e) { - // don't log issue if the core is closing - if (!(SolrException.getRootCause(e) instanceof AlreadyClosedException)) - log.warn("Could not getStatistics on info bean {}", infoBean.getName(), e); - } - - MBeanAttributeInfo[] attrInfoArr = attrInfoList - .toArray(new MBeanAttributeInfo[attrInfoList.size()]); - return new MBeanInfo(getClass().getName(), infoBean - .getDescription(), attrInfoArr, null, null, null); - } - - private OpenType determineType(Class type) { - try { - for (Field field : SimpleType.class.getFields()) { - if (field.getType().equals(SimpleType.class)) { - SimpleType candidate = (SimpleType) field.get(SimpleType.class); - if (candidate.getTypeName().equals(type.getName())) { - return candidate; - } - } - } - } catch (Exception e) { - throw new RuntimeException(e); - } - return null; - } - - @Override - public Object getAttribute(String attribute) - throws AttributeNotFoundException, MBeanException, ReflectionException { - Object val; - if ("coreHashCode".equals(attribute)) { - val = coreHashCode; - } else if (staticStats.contains(attribute) && attribute != null - && attribute.length() > 0) { - try { - String getter = "get" + attribute.substring(0, 1).toUpperCase(Locale.ROOT) - + attribute.substring(1); - Method meth = infoBean.getClass().getMethod(getter); - val = meth.invoke(infoBean); - } catch (Exception e) { - throw new AttributeNotFoundException(attribute); - } - } else { - NamedList stats = null; - if (useCachedStatsBetweenGetMBeanInfoCalls) { - NamedList cachedStats = this.cachedDynamicStats; - if (cachedStats != null) { - stats = cachedStats; - } - } - if (stats == null) { - stats = infoBean.getStatistics(); - } - val = stats.get(attribute); - } - - if (val != null) { - // It's String or one of the simple types, just return it as JMX suggests direct support for such types - for (String simpleTypeName : SimpleType.ALLOWED_CLASSNAMES_LIST) { - if (val.getClass().getName().equals(simpleTypeName)) { - return val; - } - } - // It's an arbitrary object which could be something complex and odd, return its toString, assuming that is - // a workable representation of the object - return val.toString(); - } - return null; - } - - @Override - public AttributeList getAttributes(String[] attributes) { - AttributeList list = new AttributeList(); - for (String attribute : attributes) { - try { - list.add(new Attribute(attribute, getAttribute(attribute))); - } catch (Exception e) { - log.warn("Could not get attribute " + attribute); - } - } - - return list; - } - - @Override - public void setAttribute(Attribute attribute) - throws AttributeNotFoundException, InvalidAttributeValueException, - MBeanException, ReflectionException { - throw new UnsupportedOperationException("Operation not Supported"); - } - - @Override - public AttributeList setAttributes(AttributeList attributes) { - throw new UnsupportedOperationException("Operation not Supported"); - } - - @Override - public Object invoke(String actionName, Object[] params, String[] signature) - throws MBeanException, ReflectionException { - throw new UnsupportedOperationException("Operation not Supported"); - } - } - - /** - * SolrInfoMBean that provides JMX-specific statistics. Used, for example, - * if generating full statistics is expensive; the expensive statistics can - * be generated normally for use with the web ui, while an abbreviated version - * are generated for period jmx use. - */ - public interface JmxAugmentedSolrInfoMBean extends SolrInfoMBean { - /** - * JMX-specific statistics - */ - public NamedList getStatisticsForJmx(); - } -} diff --git a/solr/core/src/java/org/apache/solr/core/NodeConfig.java b/solr/core/src/java/org/apache/solr/core/NodeConfig.java index de2dcead05fd..7a209d0d777f 100644 --- a/solr/core/src/java/org/apache/solr/core/NodeConfig.java +++ b/solr/core/src/java/org/apache/solr/core/NodeConfig.java @@ -17,7 +17,10 @@ package org.apache.solr.core; import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashSet; import java.util.Properties; +import java.util.Set; import org.apache.solr.common.SolrException; import org.apache.solr.logging.LogWatcherConfig; @@ -64,6 +67,8 @@ public class NodeConfig { private final PluginInfo[] metricReporterPlugins; + private final Set hiddenSysProps; + private final PluginInfo transientCacheConfig; private NodeConfig(String nodeName, Path coreRootDirectory, Path configSetBaseDirectory, String sharedLibDirectory, @@ -73,7 +78,7 @@ private NodeConfig(String nodeName, Path coreRootDirectory, Path configSetBaseDi LogWatcherConfig logWatcherConfig, CloudConfig cloudConfig, Integer coreLoadThreads, int transientCacheSize, boolean useSchemaCache, String managementPath, SolrResourceLoader loader, Properties solrProperties, PluginInfo[] backupRepositoryPlugins, - PluginInfo[] metricReporterPlugins, PluginInfo transientCacheConfig) { + PluginInfo[] metricReporterPlugins, Set hiddenSysProps, PluginInfo transientCacheConfig) { this.nodeName = nodeName; this.coreRootDirectory = coreRootDirectory; this.configSetBaseDirectory = configSetBaseDirectory; @@ -94,6 +99,7 @@ private NodeConfig(String nodeName, Path coreRootDirectory, Path configSetBaseDi this.solrProperties = solrProperties; this.backupRepositoryPlugins = backupRepositoryPlugins; this.metricReporterPlugins = metricReporterPlugins; + this.hiddenSysProps = hiddenSysProps; this.transientCacheConfig = transientCacheConfig; if (this.cloudConfig != null && this.getCoreLoadThreadCount(false) < 2) { @@ -187,6 +193,10 @@ public PluginInfo[] getMetricReporterPlugins() { return metricReporterPlugins; } + public Set getHiddenSysProps() { + return hiddenSysProps; + } + public PluginInfo getTransientCachePluginInfo() { return transientCacheConfig; } public static class NodeConfigBuilder { @@ -211,6 +221,7 @@ public static class NodeConfigBuilder { private Properties solrProperties = new Properties(); private PluginInfo[] backupRepositoryPlugins; private PluginInfo[] metricReporterPlugins; + private Set hiddenSysProps = new HashSet<>(DEFAULT_HIDDEN_SYS_PROPS); private PluginInfo transientCacheConfig; private final SolrResourceLoader loader; @@ -227,6 +238,14 @@ public static class NodeConfigBuilder { private static final String DEFAULT_COLLECTIONSHANDLERCLASS = "org.apache.solr.handler.admin.CollectionsHandler"; private static final String DEFAULT_CONFIGSETSHANDLERCLASS = "org.apache.solr.handler.admin.ConfigSetsHandler"; + public static final Set DEFAULT_HIDDEN_SYS_PROPS = new HashSet<>(Arrays.asList( + "javax.net.ssl.keyStorePassword", + "javax.net.ssl.trustStorePassword", + "basicauth", + "zkDigestPassword", + "zkDigestReadonlyPassword" + )); + public NodeConfigBuilder(String nodeName, SolrResourceLoader loader) { this.nodeName = nodeName; this.loader = loader; @@ -331,11 +350,16 @@ public NodeConfigBuilder setSolrCoreCacheFactoryConfig(PluginInfo transientCache return this; } + public NodeConfigBuilder setHiddenSysProps(Set hiddenSysProps) { + this.hiddenSysProps = hiddenSysProps; + return this; + } + public NodeConfig build() { return new NodeConfig(nodeName, coreRootDirectory, configSetBaseDirectory, sharedLibDirectory, shardHandlerFactoryConfig, updateShardHandlerConfig, coreAdminHandlerClass, collectionsAdminHandlerClass, infoHandlerClass, configSetsHandlerClass, logWatcherConfig, cloudConfig, coreLoadThreads, transientCacheSize, useSchemaCache, managementPath, loader, solrProperties, - backupRepositoryPlugins, metricReporterPlugins, transientCacheConfig); + backupRepositoryPlugins, metricReporterPlugins, hiddenSysProps, transientCacheConfig); } } } diff --git a/solr/core/src/java/org/apache/solr/core/PluginBag.java b/solr/core/src/java/org/apache/solr/core/PluginBag.java index 65978f33d532..4c0858e369fe 100644 --- a/solr/core/src/java/org/apache/solr/core/PluginBag.java +++ b/solr/core/src/java/org/apache/solr/core/PluginBag.java @@ -294,8 +294,8 @@ public boolean isLoaded(String name) { private void registerMBean(Object inst, SolrCore core, String pluginKey) { if (core == null) return; - if (inst instanceof SolrInfoMBean) { - SolrInfoMBean mBean = (SolrInfoMBean) inst; + if (inst instanceof SolrInfoBean) { + SolrInfoBean mBean = (SolrInfoBean) inst; String name = (inst instanceof SolrRequestHandler) ? pluginKey : mBean.getName(); core.registerInfoBean(name, mBean); } @@ -455,7 +455,7 @@ public void init(PluginInfo info) { } public RuntimeLib(SolrCore core) { - coreContainer = core.getCoreDescriptor().getCoreContainer(); + coreContainer = core.getCoreContainer(); } diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java index a24442037725..4e7ab4877044 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java +++ b/solr/core/src/java/org/apache/solr/core/SolrConfig.java @@ -276,18 +276,12 @@ public SolrConfig(SolrResourceLoader loader, String name, InputSource is) hashSetInverseLoadFactor = 1.0f / getFloat("//HashDocSet/@loadFactor", 0.75f); hashDocSetMaxSize = getInt("//HashDocSet/@maxSize", 3000); - httpCachingConfig = new HttpCachingConfig(this); + if (get("jmx", null) != null) { + log.warn("solrconfig.xml: is no longer supported, use solr.xml:/metrics/reporter section instead"); + } - Node jmx = getNode("jmx", false); - if (jmx != null) { - jmxConfig = new JmxConfiguration(true, - get("jmx/@agentId", null), - get("jmx/@serviceUrl", null), - get("jmx/@rootName", null)); + httpCachingConfig = new HttpCachingConfig(this); - } else { - jmxConfig = new JmxConfiguration(false, null, null, null); - } maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1); slowQueryThresholdMillis = getInt("query/slowQueryThresholdMillis", -1); for (SolrPluginInfo plugin : plugins) loadPluginInfo(plugin); @@ -510,48 +504,12 @@ public SolrRequestParsers getRequestParsers() { protected String dataDir; public final int slowQueryThresholdMillis; // threshold above which a query is considered slow - //JMX configuration - public final JmxConfiguration jmxConfig; - private final HttpCachingConfig httpCachingConfig; public HttpCachingConfig getHttpCachingConfig() { return httpCachingConfig; } - public static class JmxConfiguration implements MapSerializable { - public boolean enabled = false; - public String agentId; - public String serviceUrl; - public String rootName; - - public JmxConfiguration(boolean enabled, - String agentId, - String serviceUrl, - String rootName) { - this.enabled = enabled; - this.agentId = agentId; - this.serviceUrl = serviceUrl; - this.rootName = rootName; - - if (agentId != null && serviceUrl != null) { - throw new SolrException - (SolrException.ErrorCode.SERVER_ERROR, - "Incorrect JMX Configuration in solrconfig.xml, " + - "both agentId and serviceUrl cannot be specified at the same time"); - } - - } - - @Override - public Map toMap(Map map) { - map.put("agentId", agentId); - map.put("serviceUrl", serviceUrl); - map.put("rootName", rootName); - return map; - } - } - public static class HttpCachingConfig implements MapSerializable { /** @@ -858,7 +816,6 @@ public Map toMap(Map result) { m.put("queryResultMaxDocsCached", queryResultMaxDocsCached); m.put("enableLazyFieldLoading", enableLazyFieldLoading); m.put("maxBooleanClauses", booleanQueryMaxClauseCount); - if (jmxConfig != null) result.put("jmx", jmxConfig); for (SolrPluginInfo plugin : plugins) { List infos = getPluginInfos(plugin.clazz.getName()); if (infos == null || infos.isEmpty()) continue; @@ -884,7 +841,6 @@ public Map toMap(Map result) { addCacheConfig(m, filterCacheConfig, queryResultCacheConfig, documentCacheConfig, fieldValueCacheConfig); - if (jmxConfig != null) result.put("jmx", jmxConfig); m = new LinkedHashMap(); result.put("requestDispatcher", m); m.put("handleSelect", handleSelect); diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java index a6ba2dca8b41..d3a1df114dc0 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrCore.java +++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java @@ -27,7 +27,6 @@ import java.io.Writer; import java.lang.invoke.MethodHandles; import java.lang.reflect.Constructor; -import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import java.nio.file.Path; @@ -58,6 +57,7 @@ import java.util.concurrent.locks.ReentrantLock; import com.codahale.metrics.Counter; +import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import com.google.common.collect.MapMaker; import org.apache.commons.io.FileUtils; @@ -133,7 +133,7 @@ import org.apache.solr.schema.ManagedIndexSchema; import org.apache.solr.schema.SimilarityFactory; import org.apache.solr.search.QParserPlugin; -import org.apache.solr.search.SolrFieldCacheMBean; +import org.apache.solr.search.SolrFieldCacheBean; import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.search.ValueSourceParser; import org.apache.solr.search.stats.LocalStatsCache; @@ -171,7 +171,7 @@ /** * */ -public final class SolrCore implements SolrInfoMBean, SolrMetricProducer, Closeable { +public final class SolrCore implements SolrInfoBean, SolrMetricProducer, Closeable { public static final String version="1.0"; @@ -180,7 +180,6 @@ public final class SolrCore implements SolrInfoMBean, SolrMetricProducer, Closea private String name; private String logid; // used to show what name is set - private CoreDescriptor coreDescriptor; private boolean isReloaded = false; @@ -202,7 +201,7 @@ public final class SolrCore implements SolrInfoMBean, SolrMetricProducer, Closea private final PluginBag updateProcessors = new PluginBag<>(UpdateRequestProcessorFactory.class, this, true); private final Map updateProcessorChains; private final SolrCoreMetricManager coreMetricManager; - private final Map infoRegistry; + private final Map infoRegistry = new ConcurrentHashMap<>(); private final IndexDeletionPolicyWrapper solrDelPolicy; private final SolrSnapshotMetaDataManager snapshotMgr; private final DirectoryFactory directoryFactory; @@ -221,6 +220,14 @@ public final class SolrCore implements SolrInfoMBean, SolrMetricProducer, Closea private Counter newSearcherCounter; private Counter newSearcherMaxReachedCounter; private Counter newSearcherOtherErrorsCounter; + private final CoreContainer coreContainer; + + private Set metricNames = new HashSet<>(); + + public Set getMetricNames() { + return metricNames; + } + public Date getStartTimeStamp() { return startTime; } @@ -424,10 +431,8 @@ public String getName() { } public void setName(String v) { - String oldName = this.name; this.name = v; this.logid = (v==null)?"":("["+v+"] "); - this.coreDescriptor = new CoreDescriptor(v, this.coreDescriptor); if (coreMetricManager != null) { coreMetricManager.afterCoreSetName(); } @@ -448,14 +453,14 @@ public SolrCoreMetricManager getCoreMetricManager() { } /** - * Returns a Map of name vs SolrInfoMBean objects. The returned map is an instance of + * Returns a Map of name vs SolrInfoBean objects. The returned map is an instance of * a ConcurrentHashMap and therefore no synchronization is needed for putting, removing * or iterating over it. * - * @return the Info Registry map which contains SolrInfoMBean objects keyed by name + * @return the Info Registry map which contains SolrInfoBean objects keyed by name * @since solr 1.3 */ - public Map getInfoRegistry() { + public Map getInfoRegistry() { return infoRegistry; } @@ -636,9 +641,9 @@ public SolrCore reload(ConfigSet coreConfig) throws IOException { boolean success = false; SolrCore core = null; try { - CoreDescriptor cd = new CoreDescriptor(coreDescriptor.getName(), coreDescriptor); + CoreDescriptor cd = new CoreDescriptor(name, getCoreDescriptor()); cd.loadExtraProperties(); //Reload the extra properties - core = new SolrCore(getName(), getDataDir(), coreConfig.getSolrConfig(), + core = new SolrCore(coreContainer, getName(), getDataDir(), coreConfig.getSolrConfig(), coreConfig.getIndexSchema(), coreConfig.getProperties(), cd, updateHandler, solrDelPolicy, currentCore, true); @@ -658,7 +663,7 @@ public SolrCore reload(ConfigSet coreConfig) throws IOException { } private DirectoryFactory initDirectoryFactory() { - return DirectoryFactory.loadDirectoryFactory(solrConfig, getCoreDescriptor().getCoreContainer(), coreMetricManager.getRegistryName()); + return DirectoryFactory.loadDirectoryFactory(solrConfig, coreContainer, coreMetricManager.getRegistryName()); } private RecoveryStrategy.Builder initRecoveryStrategyBuilder() { @@ -845,12 +850,16 @@ private UpdateHandler createUpdateHandler(String className, UpdateHandler update return createReloadedUpdateHandler(className, "Update Handler", updateHandler); } - public SolrCore(CoreDescriptor cd, ConfigSet coreConfig) { - this(cd.getName(), null, coreConfig.getSolrConfig(), coreConfig.getIndexSchema(), coreConfig.getProperties(), + public SolrCore(CoreContainer coreContainer, CoreDescriptor cd, ConfigSet coreConfig) { + this(coreContainer, cd.getName(), null, coreConfig.getSolrConfig(), coreConfig.getIndexSchema(), coreConfig.getProperties(), cd, null, null, null, false); } - + public CoreContainer getCoreContainer() { + return coreContainer; + } + + /** * Creates a new core and register it in the list of cores. If a core with the * same name already exists, it will be stopped and replaced by this one. @@ -864,14 +873,18 @@ public SolrCore(CoreDescriptor cd, ConfigSet coreConfig) { * * @since solr 1.3 */ - public SolrCore(String name, String dataDir, SolrConfig config, - IndexSchema schema, NamedList configSetProperties, - CoreDescriptor coreDescriptor, UpdateHandler updateHandler, - IndexDeletionPolicyWrapper delPolicy, SolrCore prev, boolean reload) { + public SolrCore(CoreContainer coreContainer, String name, String dataDir, SolrConfig config, + IndexSchema schema, NamedList configSetProperties, + CoreDescriptor coreDescriptor, UpdateHandler updateHandler, + IndexDeletionPolicyWrapper delPolicy, SolrCore prev, boolean reload) { + + this.coreContainer = coreContainer; assert ObjectReleaseTracker.track(searcherExecutor); // ensure that in unclean shutdown tests we still close this - - this.coreDescriptor = Objects.requireNonNull(coreDescriptor, "coreDescriptor cannot be null"); + + CoreDescriptor cd = Objects.requireNonNull(coreDescriptor, "coreDescriptor cannot be null"); + coreContainer.solrCores.addCoreDescriptor(cd); + setName(name); MDCLoggingContext.setCore(this); @@ -900,14 +913,17 @@ public SolrCore(String name, String dataDir, SolrConfig config, checkVersionFieldExistsInSchema(schema, coreDescriptor); - SolrMetricManager metricManager = this.coreDescriptor.getCoreContainer().getMetricManager(); + SolrMetricManager metricManager = coreContainer.getMetricManager(); // initialize searcher-related metrics initializeMetrics(metricManager, coreMetricManager.getRegistryName(), null); - // Initialize JMX - this.infoRegistry = initInfoRegistry(name, config); - infoRegistry.put("fieldCache", new SolrFieldCacheMBean()); + SolrFieldCacheBean solrFieldCacheBean = new SolrFieldCacheBean(); + // this is registered at the CONTAINER level because it's not core-specific - for now we + // also register it here for back-compat + solrFieldCacheBean.initializeMetrics(metricManager, coreMetricManager.getRegistryName(), "core"); + infoRegistry.put("fieldCache", solrFieldCacheBean); + initSchema(config, schema); @@ -998,15 +1014,9 @@ public SolrCore(String name, String dataDir, SolrConfig config, // from the core. resourceLoader.inform(infoRegistry); - // Allow the directory factory to register MBeans as well - for (SolrInfoMBean bean : directoryFactory.offerMBeans()) { - log.debug("Registering JMX bean [{}] from directory factory.", bean.getName()); - // Not worried about concurrency, so no reason to use putIfAbsent - if (infoRegistry.containsKey(bean.getName())){ - log.debug("Ignoring JMX bean [{}] due to name conflict.", bean.getName()); - } else { - infoRegistry.put(bean.getName(), bean); - } + // Allow the directory factory to report metrics + if (directoryFactory instanceof SolrMetricProducer) { + ((SolrMetricProducer)directoryFactory).initializeMetrics(metricManager, coreMetricManager.getRegistryName(), "directoryFactory"); } // seed version buckets with max from index during core initialization ... requires a searcher! @@ -1040,15 +1050,15 @@ public void seedVersionBuckets() { /** Set UpdateLog to buffer updates if the slice is in construction. */ private void bufferUpdatesIfConstructing(CoreDescriptor coreDescriptor) { - final CoreContainer cc = coreDescriptor.getCoreContainer(); - if (cc != null && cc.isZooKeeperAware()) { + + if (coreContainer != null && coreContainer.isZooKeeperAware()) { if (reqHandlers.get("/get") == null) { log.warn("WARNING: RealTimeGetHandler is not registered at /get. " + "SolrCloud will always use full index replication instead of the more efficient PeerSync method."); } // ZK pre-register would have already happened so we read slice properties now - final ClusterState clusterState = cc.getZkController().getClusterState(); + final ClusterState clusterState = coreContainer.getZkController().getClusterState(); final DocCollection collection = clusterState.getCollection(coreDescriptor.getCloudDescriptor().getCollectionName()); final Slice slice = collection.getSlice(coreDescriptor.getCloudDescriptor().getShardId()); if (slice.getState() == Slice.State.CONSTRUCTION) { @@ -1126,34 +1136,45 @@ private SolrCoreMetricManager initCoreMetricManager(SolrConfig config) { @Override public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { - newSearcherCounter = manager.counter(registry, "new", Category.SEARCHER.toString()); - newSearcherTimer = manager.timer(registry, "time", Category.SEARCHER.toString(), "new"); - newSearcherWarmupTimer = manager.timer(registry, "warmup", Category.SEARCHER.toString(), "new"); - newSearcherMaxReachedCounter = manager.counter(registry, "maxReached", Category.SEARCHER.toString(), "new"); - newSearcherOtherErrorsCounter = manager.counter(registry, "errors", Category.SEARCHER.toString(), "new"); - - manager.registerGauge(registry, () -> name == null ? "(null)" : name, true, "coreName", Category.CORE.toString()); - manager.registerGauge(registry, () -> startTime, true, "startTime", Category.CORE.toString()); - manager.registerGauge(registry, () -> getOpenCount(), true, "refCount", Category.CORE.toString()); - manager.registerGauge(registry, () -> resourceLoader.getInstancePath().toString(), true, "instanceDir", Category.CORE.toString()); - manager.registerGauge(registry, () -> getIndexDir(), true, "indexDir", Category.CORE.toString()); - manager.registerGauge(registry, () -> getIndexSize(), true, "sizeInBytes", Category.INDEX.toString()); - manager.registerGauge(registry, () -> NumberUtils.readableSize(getIndexSize()), true, "size", Category.INDEX.toString()); - manager.registerGauge(registry, () -> coreDescriptor.getCoreContainer().getCoreNames(this), true, "aliases", Category.CORE.toString()); + newSearcherCounter = manager.counter(this, registry, "new", Category.SEARCHER.toString()); + newSearcherTimer = manager.timer(this, registry, "time", Category.SEARCHER.toString(), "new"); + newSearcherWarmupTimer = manager.timer(this, registry, "warmup", Category.SEARCHER.toString(), "new"); + newSearcherMaxReachedCounter = manager.counter(this, registry, "maxReached", Category.SEARCHER.toString(), "new"); + newSearcherOtherErrorsCounter = manager.counter(this, registry, "errors", Category.SEARCHER.toString(), "new"); + + manager.registerGauge(this, registry, () -> name == null ? "(null)" : name, true, "coreName", Category.CORE.toString()); + manager.registerGauge(this, registry, () -> startTime, true, "startTime", Category.CORE.toString()); + manager.registerGauge(this, registry, () -> getOpenCount(), true, "refCount", Category.CORE.toString()); + manager.registerGauge(this, registry, () -> resourceLoader.getInstancePath().toString(), true, "instanceDir", Category.CORE.toString()); + manager.registerGauge(this, registry, () -> getIndexDir(), true, "indexDir", Category.CORE.toString()); + manager.registerGauge(this, registry, () -> getIndexSize(), true, "sizeInBytes", Category.INDEX.toString()); + manager.registerGauge(this, registry, () -> NumberUtils.readableSize(getIndexSize()), true, "size", Category.INDEX.toString()); + if (coreContainer != null) { + manager.registerGauge(this, registry, () -> coreContainer.getCoreNames(this), true, "aliases", Category.CORE.toString()); + final CloudDescriptor cd = getCoreDescriptor().getCloudDescriptor(); + if (cd != null) { + manager.registerGauge(this, registry, () -> { + if (cd.getCollectionName() != null) { + return cd.getCollectionName(); + } else { + return "_notset_"; + } + }, true, "collection", Category.CORE.toString()); + + manager.registerGauge(this, registry, () -> { + if (cd.getShardId() != null) { + return cd.getShardId(); + } else { + return "_auto_"; + } + }, true, "shard", Category.CORE.toString()); + } + } // initialize disk total / free metrics Path dataDirPath = Paths.get(dataDir); File dataDirFile = dataDirPath.toFile(); - manager.registerGauge(registry, () -> dataDirFile.getTotalSpace(), true, "totalSpace", Category.CORE.toString(), "fs"); - manager.registerGauge(registry, () -> dataDirFile.getUsableSpace(), true, "usableSpace", Category.CORE.toString(), "fs"); - } - - private Map initInfoRegistry(String name, SolrConfig config) { - if (config.jmxConfig.enabled) { - return new JmxMonitoredMap(name, coreMetricManager.getRegistryName(), String.valueOf(this.hashCode()), config.jmxConfig); - } else { - log.debug("JMX monitoring not detected for core: " + name); - return new ConcurrentHashMap<>(); - } + manager.registerGauge(this, registry, () -> dataDirFile.getTotalSpace(), true, "totalSpace", Category.CORE.toString(), "fs"); + manager.registerGauge(this, registry, () -> dataDirFile.getUsableSpace(), true, "usableSpace", Category.CORE.toString(), "fs"); } private void checkVersionFieldExistsInSchema(IndexSchema schema, CoreDescriptor coreDescriptor) { @@ -2685,6 +2706,9 @@ public T initPlugins(List pluginInfos, Map registry, for (PluginInfo info : pluginInfos) { T o = createInitInstance(info,type, type.getSimpleName(), defClassName); registry.put(info.name, o); + if (o instanceof SolrMetricProducer) { + coreMetricManager.registerMetricProducer(type.getSimpleName() + "." + info.name, (SolrMetricProducer)o); + } if(info.isDefault()){ def = o; } @@ -2692,6 +2716,12 @@ public T initPlugins(List pluginInfos, Map registry, return def; } + public void initDefaultPlugin(Object plugin, Class type) { + if (plugin instanceof SolrMetricProducer) { + coreMetricManager.registerMetricProducer(type.getSimpleName() + ".default", (SolrMetricProducer)plugin); + } + } + /**For a given List of PluginInfo return the instances as a List * @param defClassName The default classname if PluginInfo#className == null * @return The instances initialized @@ -2746,7 +2776,7 @@ protected RestManager initRestManager() throws SolrException { if (initArgs == null) initArgs = new NamedList<>(); - String collection = coreDescriptor.getCollectionName(); + String collection = getCoreDescriptor().getCollectionName(); StorageIO storageIO = ManagedResourceStorage.newStorageIO(collection, resourceLoader, initArgs); mgr.init(resourceLoader, initArgs, storageIO); @@ -2755,7 +2785,7 @@ protected RestManager initRestManager() throws SolrException { } public CoreDescriptor getCoreDescriptor() { - return coreDescriptor; + return coreContainer.getCoreDescriptor(name); } public IndexDeletionPolicyWrapper getDeletionPolicy(){ @@ -2775,14 +2805,9 @@ public ReentrantLock getRuleExpiryLock() { } ///////////////////////////////////////////////////////////////////// - // SolrInfoMBean stuff: Statistics and Module Info + // SolrInfoBean stuff: Statistics and Module Info ///////////////////////////////////////////////////////////////////// - @Override - public String getVersion() { - return SolrCore.version; - } - @Override public String getDescription() { return "SolrCore"; @@ -2794,55 +2819,15 @@ public Category getCategory() { } @Override - public String getSource() { - return null; - } - - @Override - public URL[] getDocs() { - return null; - } - - @Override - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap<>(8); - lst.add("coreName", name==null ? "(null)" : name); - lst.add("startTime", startTime); - lst.add("refCount", getOpenCount()); - lst.add("instanceDir", resourceLoader.getInstancePath()); - lst.add("indexDir", getIndexDir()); - long size = getIndexSize(); - lst.add("sizeInBytes", size); - lst.add("size", NumberUtils.readableSize(size)); - - CoreDescriptor cd = getCoreDescriptor(); - if (cd != null) { - if (null != cd && cd.getCoreContainer() != null) { - lst.add("aliases", getCoreDescriptor().getCoreContainer().getCoreNames(this)); - } - CloudDescriptor cloudDesc = cd.getCloudDescriptor(); - if (cloudDesc != null) { - String collection = cloudDesc.getCollectionName(); - if (collection == null) { - collection = "_notset_"; - } - lst.add("collection", collection); - String shard = cloudDesc.getShardId(); - if (shard == null) { - shard = "_auto_"; - } - lst.add("shard", shard); - } - } - - return lst; + public MetricRegistry getMetricRegistry() { + return coreMetricManager.getRegistry(); } public Codec getCodec() { return codec; } - public void unloadOnClose(boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) { + public void unloadOnClose(final CoreDescriptor desc, boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) { if (deleteIndexDir) { try { directoryFactory.remove(getIndexDir()); @@ -2865,13 +2850,12 @@ public void preClose(SolrCore core) { @Override public void postClose(SolrCore core) { - CoreDescriptor cd = core.getCoreDescriptor(); - if (cd != null) { + if (desc != null) { try { - FileUtils.deleteDirectory(cd.getInstanceDir().toFile()); + FileUtils.deleteDirectory(desc.getInstanceDir().toFile()); } catch (IOException e) { SolrException.log(log, "Failed to delete instance dir for core:" - + core.getName() + " dir:" + cd.getInstanceDir()); + + core.getName() + " dir:" + desc.getInstanceDir()); } } } @@ -2930,7 +2914,7 @@ private void registerConfListener() { public static Runnable getConfListener(SolrCore core, ZkSolrResourceLoader zkSolrResourceLoader) { final String coreName = core.getName(); - final CoreContainer cc = core.getCoreDescriptor().getCoreContainer(); + final CoreContainer cc = core.getCoreContainer(); final String overlayPath = zkSolrResourceLoader.getConfigSetZkPath() + "/" + ConfigOverlay.RESOURCE_NAME; final String solrConfigPath = zkSolrResourceLoader.getConfigSetZkPath() + "/" + core.getSolrConfig().getName(); String schemaRes = null; @@ -2983,11 +2967,11 @@ public static Runnable getConfListener(SolrCore core, ZkSolrResourceLoader zkSol }; } - public void registerInfoBean(String name, SolrInfoMBean solrInfoMBean) { - infoRegistry.put(name, solrInfoMBean); + public void registerInfoBean(String name, SolrInfoBean solrInfoBean) { + infoRegistry.put(name, solrInfoBean); - if (solrInfoMBean instanceof SolrMetricProducer) { - SolrMetricProducer producer = (SolrMetricProducer) solrInfoMBean; + if (solrInfoBean instanceof SolrMetricProducer) { + SolrMetricProducer producer = (SolrMetricProducer) solrInfoBean; coreMetricManager.registerMetricProducer(name, producer); } } @@ -3064,7 +3048,6 @@ public BlobRepository.BlobContentRef loadDecodeAndCacheBlob(String key, BlobRepo if (!BlobRepository.BLOB_KEY_PATTERN_CHECKER.matcher(key).matches()) { throw new IllegalArgumentException("invalid key format, must end in /N where N is the version number"); } - CoreContainer coreContainer = getCoreDescriptor().getCoreContainer(); // define the blob BlobRepository.BlobContentRef blobRef = coreContainer.getBlobRepository().getBlobIncRef(key, decoder); addCloseHook(new CloseHook() { @@ -3074,7 +3057,7 @@ public void preClose(SolrCore core) { @Override public void postClose(SolrCore core) { - core.getCoreDescriptor().getCoreContainer().getBlobRepository().decrementBlobRefCount(blobRef); + coreContainer.getBlobRepository().decrementBlobRefCount(blobRef); } }); return blobRef; diff --git a/solr/core/src/java/org/apache/solr/core/SolrCores.java b/solr/core/src/java/org/apache/solr/core/SolrCores.java index 40d511558f6d..ef6fca569bf0 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrCores.java +++ b/solr/core/src/java/org/apache/solr/core/SolrCores.java @@ -47,7 +47,8 @@ class SolrCores implements Observer { private static Object modifyLock = new Object(); // for locking around manipulating any of the core maps. private final Map cores = new LinkedHashMap<>(); // For "permanent" cores - private final Map lazyDescriptors = new LinkedHashMap<>(); + // These descriptors, once loaded, will _not_ be unloaded, i.e. they are not "transient". + private final Map residentDesciptors = new LinkedHashMap<>(); private final CoreContainer container; @@ -67,17 +68,26 @@ class SolrCores implements Observer { this.container = container; } - protected void putDynamicDescriptor(String rawName, CoreDescriptor cd) { + protected void addCoreDescriptor(CoreDescriptor p) { synchronized (modifyLock) { - if (cd.isTransient()) { + if (p.isTransient()) { + if (container.getTransientCacheHandler() != null) { + container.getTransientCacheHandler().addTransientDescriptor(p.getName(), p); + } + } else { + residentDesciptors.put(p.getName(), p); + } + } + } + + protected void removeCoreDescriptor(CoreDescriptor p) { + synchronized (modifyLock) { + if (p.isTransient()) { if (container.getTransientCacheHandler() != null) { - container.getTransientCacheHandler().addTransientDescriptor(rawName, cd); - } else { - log.error("Tried to add transient core to transient handler, but no transient core handler has been found. " - + " Descriptor: " + cd.toString()); + container.getTransientCacheHandler().removeTransientDescriptor(p.getName()); } } else { - lazyDescriptors.put(rawName, cd); + residentDesciptors.remove(p.getName()); } } } @@ -149,10 +159,18 @@ protected SolrCore putTransientCore(NodeConfig cfg, String name, SolrCore core, return retCore; } - protected SolrCore putCore(String name, SolrCore core) { + // Returns the old core if there was a core of the same name. + protected SolrCore putCore(CoreDescriptor cd, SolrCore core) { synchronized (modifyLock) { - return cores.put(name, core); + if (cd.isTransient()) { + if (container.getTransientCacheHandler() != null) { + return container.getTransientCacheHandler().addCore(cd.getName(), core); + } + } else { + return cores.put(cd.getName(), core); + } } + return null; } /** @@ -231,7 +249,7 @@ public Collection getAllCoreNames() { if (container.getTransientCacheHandler() != null) { set.addAll(container.getTransientCacheHandler().getAllCoreNames()); } - set.addAll(lazyDescriptors.keySet()); + set.addAll(residentDesciptors.keySet()); } return set; } @@ -260,13 +278,19 @@ protected void swap(String n0, String n1) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n1); } } + // When we swap the cores, we also need to swap the associated core descriptors. Note, this changes the + // name of the coreDescriptor by virtue of the c-tor + CoreDescriptor cd1 = c1.getCoreDescriptor(); + addCoreDescriptor(new CoreDescriptor(n1, c0.getCoreDescriptor())); + addCoreDescriptor(new CoreDescriptor(n0, cd1)); cores.put(n0, c1); cores.put(n1, c0); + c0.setName(n1); + c1.setName(n0); + container.getMetricManager().swapRegistries( c0.getCoreMetricManager().getRegistryName(), c1.getCoreMetricManager().getRegistryName()); - c0.setName(n1); - c1.setName(n0); } } @@ -277,12 +301,10 @@ protected SolrCore remove(String name) { SolrCore ret = cores.remove(name); // It could have been a newly-created core. It could have been a transient core. The newly-created cores // in particular should be checked. It could have been a dynamic core. - TransientSolrCoreCache transientHandler = container.getTransientCacheHandler(); + TransientSolrCoreCache transientHandler = container.getTransientCacheHandler(); if (ret == null && transientHandler != null) { ret = transientHandler.removeCore(name); - transientHandler.removeTransientDescriptor(name); } - lazyDescriptors.remove(name); return ret; } } @@ -304,14 +326,6 @@ SolrCore getCoreFromAnyList(String name, boolean incRefCount) { } } - protected CoreDescriptor getDynamicDescriptor(String name) { - synchronized (modifyLock) { - CoreDescriptor cd = lazyDescriptors.get(name); - if (cd != null || container.getTransientCacheHandler() == null) return cd; - return container.getTransientCacheHandler().getTransientDescriptor(name); - } - } - // See SOLR-5366 for why the UNLOAD command needs to know whether a core is actually loaded or not, it might have // to close the core. However, there's a race condition. If the core happens to be in the pending "to close" queue, // we should NOT close it in unload core. @@ -350,7 +364,7 @@ protected boolean isLoaded(String name) { protected CoreDescriptor getUnloadedCoreDescriptor(String cname) { synchronized (modifyLock) { - CoreDescriptor desc = lazyDescriptors.get(cname); + CoreDescriptor desc = residentDesciptors.get(cname); if (desc == null) { if (container.getTransientCacheHandler() == null) return null; desc = container.getTransientCacheHandler().getTransientDescriptor(cname); @@ -439,10 +453,8 @@ protected SolrCore getCoreToClose() { */ public CoreDescriptor getCoreDescriptor(String coreName) { synchronized (modifyLock) { - if (cores.containsKey(coreName)) - return cores.get(coreName).getCoreDescriptor(); - if (lazyDescriptors.containsKey(coreName) || container.getTransientCacheHandler() == null) - return lazyDescriptors.get(coreName); + if (residentDesciptors.containsKey(coreName)) + return residentDesciptors.get(coreName); return container.getTransientCacheHandler().getTransientDescriptor(coreName); } } diff --git a/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java b/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java new file mode 100644 index 000000000000..472b15e0819e --- /dev/null +++ b/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.core; + +import java.util.Map; +import java.util.Set; + +import com.codahale.metrics.MetricRegistry; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.util.stats.MetricUtils; + +/** + * Interface for getting various ui friendly strings + * for use by objects which are 'pluggable' to make server administration + * easier. + */ +public interface SolrInfoBean { + + /** + * Category of Solr component. + */ + enum Category { CONTAINER, ADMIN, CORE, QUERY, UPDATE, CACHE, HIGHLIGHTER, QUERYPARSER, SPELLCHECKER, + SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER } + + /** + * Top-level group of beans or metrics for a subsystem. + */ + enum Group { jvm, jetty, node, core, collection, shard, cluster, overseer } + + /** + * Simple common usage name, e.g. BasicQueryHandler, + * or fully qualified class name. + */ + String getName(); + /** Simple one or two line description */ + String getDescription(); + /** Category of this component */ + Category getCategory(); + + /** Optionally return a snapshot of metrics that this component reports, or null. + * Default implementation requires that both {@link #getMetricNames()} and + * {@link #getMetricRegistry()} return non-null values. + */ + default Map getMetricsSnapshot() { + if (getMetricRegistry() == null || getMetricNames() == null) { + return null; + } + return MetricUtils.convertMetrics(getMetricRegistry(), getMetricNames()); + } + + /** + * Modifiable set of metric names that this component reports (default is null, + * which means none). If not null then this set is used by {@link #registerMetricName(String)} + * to capture what metrics names are reported from this component. + */ + default Set getMetricNames() { + return null; + } + + /** + * An instance of {@link MetricRegistry} that this component uses for metrics reporting + * (default is null, which means no registry). + */ + default MetricRegistry getMetricRegistry() { + return null; + } + + /** Register a metric name that this component reports. This method is called by various + * metric registration methods in {@link org.apache.solr.metrics.SolrMetricManager} in order + * to capture what metric names are reported from this component (which in turn is called + * from {@link org.apache.solr.metrics.SolrMetricProducer#initializeMetrics(SolrMetricManager, String, String)}). + *

    Default implementation registers all metrics added by a component. Implementations may + * override this to avoid reporting some or all metrics returned by {@link #getMetricsSnapshot()}

    + */ + default void registerMetricName(String name) { + Set names = getMetricNames(); + if (names != null) { + names.add(name); + } + } +} diff --git a/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java b/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java deleted file mode 100644 index 63bdef0f7bcd..000000000000 --- a/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.core; - -import java.net.URL; - -import org.apache.solr.common.util.NamedList; - -/** - * MBean interface for getting various ui friendly strings and URLs - * for use by objects which are 'pluggable' to make server administration - * easier. - * - * - */ -public interface SolrInfoMBean { - - /** - * Category of Solr component. - */ - enum Category { CONTAINER, ADMIN, CORE, QUERY, UPDATE, CACHE, HIGHLIGHTER, QUERYPARSER, SPELLCHECKER, - SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER } - - /** - * Top-level group of beans or metrics for a subsystem. - */ - enum Group { jvm, jetty, node, core, collection, shard, cluster, overseer } - - /** - * Simple common usage name, e.g. BasicQueryHandler, - * or fully qualified clas name. - */ - public String getName(); - /** Simple common usage version, e.g. 2.0 */ - public String getVersion(); - /** Simple one or two line description */ - public String getDescription(); - /** Purpose of this Class */ - public Category getCategory(); - /** CVS Source, SVN Source, etc */ - public String getSource(); - /** - * Documentation URL list. - * - *

    - * Suggested documentation URLs: Homepage for sponsoring project, - * FAQ on class usage, Design doc for class, Wiki, bug reporting URL, etc... - *

    - */ - public URL[] getDocs(); - /** - * Any statistics this instance would like to be publicly available via - * the Solr Administration interface. - * - *

    - * Any Object type may be stored in the list, but only the - * toString() representation will be used. - *

    - */ - public NamedList getStatistics(); - -} diff --git a/solr/core/src/java/org/apache/solr/core/SolrInfoMBeanWrapper.java b/solr/core/src/java/org/apache/solr/core/SolrInfoMBeanWrapper.java deleted file mode 100644 index 534b88449318..000000000000 --- a/solr/core/src/java/org/apache/solr/core/SolrInfoMBeanWrapper.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.solr.core; - -import java.net.URL; - -import org.apache.solr.common.util.NamedList; - -/** - * Wraps a {@link SolrInfoMBean}. - */ -public class SolrInfoMBeanWrapper implements SolrInfoMBean { - private final SolrInfoMBean mbean; - - public SolrInfoMBeanWrapper(SolrInfoMBean mbean) { - this.mbean = mbean; - } - - /** {@inheritDoc} */ - @Override - public String getName() { return mbean.getName(); } - - /** {@inheritDoc} */ - @Override - public String getVersion() { return mbean.getVersion(); } - - /** {@inheritDoc} */ - @Override - public String getDescription() { return mbean.getDescription(); } - - /** {@inheritDoc} */ - @Override - public Category getCategory() { return mbean.getCategory(); } - - /** {@inheritDoc} */ - @Override - public String getSource() { return mbean.getSource(); } - - /** {@inheritDoc} */ - @Override - public URL[] getDocs() { return mbean.getDocs(); } - - /** {@inheritDoc} */ - @Override - public NamedList getStatistics() { return mbean.getStatistics(); } - -} diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java index d310ff23c715..2e679cf6d4d4 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java +++ b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java @@ -100,7 +100,7 @@ public class SolrResourceLoader implements ResourceLoader,Closeable private String dataDir; private final List waitingForCore = Collections.synchronizedList(new ArrayList()); - private final List infoMBeans = Collections.synchronizedList(new ArrayList()); + private final List infoMBeans = Collections.synchronizedList(new ArrayList()); private final List waitingForResources = Collections.synchronizedList(new ArrayList()); private static final Charset UTF_8 = StandardCharsets.UTF_8; @@ -664,9 +664,9 @@ public T newInstance(String cName, Class expectedType, String [] subPacka assertAwareCompatibility( ResourceLoaderAware.class, obj ); waitingForResources.add( (ResourceLoaderAware)obj ); } - if (obj instanceof SolrInfoMBean){ + if (obj instanceof SolrInfoBean){ //TODO: Assert here? - infoMBeans.add((SolrInfoMBean) obj); + infoMBeans.add((SolrInfoBean) obj); } } @@ -722,21 +722,21 @@ public void inform( ResourceLoader loader ) throws IOException } /** - * Register any {@link org.apache.solr.core.SolrInfoMBean}s + * Register any {@link SolrInfoBean}s * @param infoRegistry The Info Registry */ - public void inform(Map infoRegistry) { + public void inform(Map infoRegistry) { // this can currently happen concurrently with requests starting and lazy components // loading. Make sure infoMBeans doesn't change. - SolrInfoMBean[] arr; + SolrInfoBean[] arr; synchronized (infoMBeans) { - arr = infoMBeans.toArray(new SolrInfoMBean[infoMBeans.size()]); + arr = infoMBeans.toArray(new SolrInfoBean[infoMBeans.size()]); waitingForResources.clear(); } - for (SolrInfoMBean bean : arr) { + for (SolrInfoBean bean : arr) { // Too slow? I suspect not, but we may need // to start tracking this in a Set. if (!infoRegistry.containsValue(bean)) { @@ -879,7 +879,7 @@ static void assertAwareCompatibility( Class aware, Object obj ) public void close() throws IOException { IOUtils.close(classLoader); } - public List getInfoMBeans(){ + public List getInfoMBeans(){ return Collections.unmodifiableList(infoMBeans); } diff --git a/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java b/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java index b37bd521681d..65f29a47ba95 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java +++ b/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java @@ -16,6 +16,7 @@ */ package org.apache.solr.core; +import javax.management.MBeanServer; import javax.xml.xpath.XPath; import javax.xml.xpath.XPathConstants; import javax.xml.xpath.XPathExpressionException; @@ -25,7 +26,10 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; @@ -35,8 +39,10 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.util.NamedList; import org.apache.solr.logging.LogWatcherConfig; +import org.apache.solr.metrics.reporters.SolrJmxReporter; import org.apache.solr.update.UpdateShardHandlerConfig; import org.apache.solr.util.DOMUtil; +import org.apache.solr.util.JmxUtil; import org.apache.solr.util.PropertiesUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,6 +104,7 @@ public static NodeConfig fromConfig(Config config) { configBuilder.setCloudConfig(cloudConfig); configBuilder.setBackupRepositoryPlugins(getBackupRepositoryPluginInfos(config)); configBuilder.setMetricReporterPlugins(getMetricReporterPluginInfos(config)); + configBuilder.setHiddenSysProps(getHiddenSysProps(config)); return fillSolrSection(configBuilder, entries); } @@ -284,6 +291,7 @@ private static UpdateShardHandlerConfig loadUpdateConfig(NamedList nl, b int distributedSocketTimeout = UpdateShardHandlerConfig.DEFAULT_DISTRIBUPDATESOTIMEOUT; int distributedConnectionTimeout = UpdateShardHandlerConfig.DEFAULT_DISTRIBUPDATECONNTIMEOUT; String metricNameStrategy = UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY; + int maxRecoveryThreads = UpdateShardHandlerConfig.DEFAULT_MAXRECOVERYTHREADS; Object muc = nl.remove("maxUpdateConnections"); if (muc != null) { @@ -315,10 +323,17 @@ private static UpdateShardHandlerConfig loadUpdateConfig(NamedList nl, b defined = true; } + Object mrt = nl.remove("maxRecoveryThreads"); + if (mrt != null) { + maxRecoveryThreads = parseInt("maxRecoveryThreads", mrt.toString()); + defined = true; + } + if (!defined && !alwaysDefine) return null; - return new UpdateShardHandlerConfig(maxUpdateConnections, maxUpdateConnectionsPerHost, distributedSocketTimeout, distributedConnectionTimeout, metricNameStrategy); + return new UpdateShardHandlerConfig(maxUpdateConnections, maxUpdateConnectionsPerHost, distributedSocketTimeout, + distributedConnectionTimeout, metricNameStrategy, maxRecoveryThreads); } @@ -448,15 +463,51 @@ private static PluginInfo[] getBackupRepositoryPluginInfos(Config config) { private static PluginInfo[] getMetricReporterPluginInfos(Config config) { NodeList nodes = (NodeList) config.evaluate("solr/metrics/reporter", XPathConstants.NODESET); - if (nodes == null || nodes.getLength() == 0) - return new PluginInfo[0]; - PluginInfo[] configs = new PluginInfo[nodes.getLength()]; + List configs = new ArrayList<>(); + boolean hasJmxReporter = false; + if (nodes != null && nodes.getLength() > 0) { + for (int i = 0; i < nodes.getLength(); i++) { + // we don't require class in order to support predefined replica and node reporter classes + PluginInfo info = new PluginInfo(nodes.item(i), "SolrMetricReporter", true, false); + String clazz = info.className; + if (clazz != null && clazz.equals(SolrJmxReporter.class.getName())) { + hasJmxReporter = true; + } + configs.add(info); + } + } + // if there's an MBean server running but there was no JMX reporter then add a default one + MBeanServer mBeanServer = JmxUtil.findFirstMBeanServer(); + if (mBeanServer != null && !hasJmxReporter) { + log.info("MBean server found: " + mBeanServer + ", but no JMX reporters were configured - adding default JMX reporter."); + Map attributes = new HashMap<>(); + attributes.put("name", "default"); + attributes.put("class", SolrJmxReporter.class.getName()); + PluginInfo defaultPlugin = new PluginInfo("reporter", attributes); + configs.add(defaultPlugin); + } + return configs.toArray(new PluginInfo[configs.size()]); + } + + private static Set getHiddenSysProps(Config config) { + NodeList nodes = (NodeList) config.evaluate("solr/metrics/hiddenSysProps/str", XPathConstants.NODESET); + if (nodes == null || nodes.getLength() == 0) { + return NodeConfig.NodeConfigBuilder.DEFAULT_HIDDEN_SYS_PROPS; + } + Set props = new HashSet<>(); for (int i = 0; i < nodes.getLength(); i++) { - // we don't require class in order to support predefined replica and node reporter classes - configs[i] = new PluginInfo(nodes.item(i), "SolrMetricReporter", true, false); + String prop = DOMUtil.getText(nodes.item(i)); + if (prop != null && !prop.trim().isEmpty()) { + props.add(prop.trim()); + } + } + if (props.isEmpty()) { + return NodeConfig.NodeConfigBuilder.DEFAULT_HIDDEN_SYS_PROPS; + } else { + return props; } - return configs; } + private static PluginInfo getTransientCoreCacheFactoryPluginInfo(Config config) { Node node = config.getNode("solr/transientCoreCacheFactory", false); return (node == null) ? null : new PluginInfo(node, "transientCoreCacheFactory", false, true); diff --git a/solr/core/src/java/org/apache/solr/core/ZkContainer.java b/solr/core/src/java/org/apache/solr/core/ZkContainer.java index 6665c4edf489..16ba4d861dd7 100644 --- a/solr/core/src/java/org/apache/solr/core/ZkContainer.java +++ b/solr/core/src/java/org/apache/solr/core/ZkContainer.java @@ -115,7 +115,7 @@ public void initZooKeeper(final CoreContainer cc, String solrHome, CloudConfig c @Override public List getCurrentDescriptors() { List descriptors = new ArrayList<>( - cc.getCoreNames().size()); + cc.getLoadedCoreNames().size()); Collection cores = cc.getCores(); for (SolrCore core : cores) { descriptors.add(core.getCoreDescriptor()); diff --git a/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java b/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java index 485f9c399069..392930fd6d7c 100644 --- a/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java +++ b/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java @@ -14,9 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/** - * - */ package org.apache.solr.handler; import java.io.IOException; diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java index 713d6fce88c5..fd8d4bb56660 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java @@ -62,7 +62,7 @@ class CdcrBufferStateManager extends CdcrStateManager { // Startup and register the watcher at startup try { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); watcher = this.initWatcher(zkClient); this.setState(CdcrParams.BufferState.get(zkClient.getData(this.getZnodePath(), watcher, null, true))); } catch (KeeperException | InterruptedException e) { @@ -103,7 +103,7 @@ CdcrParams.BufferState getState() { * action. */ void synchronize() { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); try { zkClient.setData(this.getZnodePath(), this.getState().getBytes(), true); // check if nobody changed it in the meantime, and set a new watcher @@ -114,7 +114,7 @@ void synchronize() { } private void createStateNode() { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); try { if (!zkClient.exists(this.getZnodePath(), true)) { if (!zkClient.exists(this.getZnodeBase(), true)) { @@ -158,7 +158,7 @@ public void process(WatchedEvent event) { if (Event.EventType.None.equals(event.getType())) { return; } - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); try { CdcrParams.BufferState state = CdcrParams.BufferState.get(zkClient.getData(CdcrBufferStateManager.this.getZnodePath(), watcher, null, true)); log.info("Received new CDCR buffer state from watcher: {} @ {}:{}", state, collectionName, shard); diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java index 7d8ddc406564..1b4d8af2ac31 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java @@ -53,8 +53,8 @@ class CdcrLeaderStateManager extends CdcrStateManager { // Fetch leader state and register the watcher at startup try { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); - ClusterState clusterState = core.getCoreDescriptor().getCoreContainer().getZkController().getClusterState(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); + ClusterState clusterState = core.getCoreContainer().getZkController().getClusterState(); watcher = this.initWatcher(zkClient); // if the node does not exist, it means that the leader was not yet registered. This can happen @@ -89,7 +89,7 @@ private Watcher initWatcher(SolrZkClient zkClient) { } private void checkIfIAmLeader() throws KeeperException, InterruptedException { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); ZkNodeProps props = ZkNodeProps.load(zkClient.getData(CdcrLeaderStateManager.this.getZnodePath(), null, null, true)); if (props != null) { CdcrLeaderStateManager.this.setAmILeader(props.get("core").equals(core.getName())); @@ -144,8 +144,8 @@ public void process(WatchedEvent event) { try { log.info("Received new leader state @ {}:{}", collectionName, shard); - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); - ClusterState clusterState = core.getCoreDescriptor().getCoreContainer().getZkController().getClusterState(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); + ClusterState clusterState = core.getCoreContainer().getZkController().getClusterState(); if (CdcrLeaderStateManager.this.isLeaderRegistered(zkClient, clusterState)) { CdcrLeaderStateManager.this.checkIfIAmLeader(); } diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java index b1c8ddaba274..05be0772d5bb 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java @@ -62,7 +62,7 @@ class CdcrProcessStateManager extends CdcrStateManager { // Register the watcher at startup try { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); watcher = this.initWatcher(zkClient); this.setState(CdcrParams.ProcessState.get(zkClient.getData(this.getZnodePath(), watcher, null, true))); } catch (KeeperException | InterruptedException e) { @@ -103,7 +103,7 @@ CdcrParams.ProcessState getState() { * action. */ void synchronize() { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); try { zkClient.setData(this.getZnodePath(), this.getState().getBytes(), true); // check if nobody changed it in the meantime, and set a new watcher @@ -114,7 +114,7 @@ void synchronize() { } private void createStateNode() { - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); try { if (!zkClient.exists(this.getZnodePath(), true)) { if (!zkClient.exists(this.getZnodeBase(), true)) { // Should be a no-op if the node exists @@ -158,7 +158,7 @@ public void process(WatchedEvent event) { if (Event.EventType.None.equals(event.getType())) { return; } - SolrZkClient zkClient = core.getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient(); try { CdcrParams.ProcessState state = CdcrParams.ProcessState.get(zkClient.getData(CdcrProcessStateManager.this.getZnodePath(), watcher, null, true)); log.info("Received new CDCR process state from watcher: {} @ {}:{}", state, collectionName, shard); diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java index 528e0b7aa6bb..6f0d7040445c 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java @@ -236,7 +236,7 @@ private class BootstrapStatusRunnable implements Runnable, Closeable { this.ulog = (CdcrUpdateLog) core.getUpdateHandler().getUpdateLog(); this.state = state; this.targetCollection = state.getTargetCollection(); - String baseUrl = core.getCoreDescriptor().getCoreContainer().getZkController().getBaseUrl(); + String baseUrl = core.getCoreContainer().getZkController().getBaseUrl(); this.myCoreUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, core.getName()); } diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java index ba174f952569..44c8646f2a9c 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java @@ -244,7 +244,7 @@ public void inform(SolrCore core) { collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName(); // Make sure that the core is ZKAware - if (!core.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { + if (!core.getCoreContainer().isZooKeeperAware()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Solr instance is not running in SolrCloud mode."); } @@ -390,7 +390,7 @@ private NamedList getStatus() { */ private void handleCollectionCheckpointAction(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, SolrServerException { - ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = core.getCoreContainer().getZkController(); try { zkController.getZkStateReader().forceUpdateCollection(collection); } catch (Exception e) { @@ -638,7 +638,7 @@ private void handleBootstrapAction(SolrQueryRequest req, SolrQueryResponse rsp) running.set(true); String masterUrl = req.getParams().get(ReplicationHandler.MASTER_URL); bootstrapCallable = new BootstrapCallable(masterUrl, core); - bootstrapFuture = core.getCoreDescriptor().getCoreContainer().getUpdateShardHandler().getRecoveryExecutor().submit(bootstrapCallable); + bootstrapFuture = core.getCoreContainer().getUpdateShardHandler().getRecoveryExecutor().submit(bootstrapCallable); try { bootstrapFuture.get(); } catch (InterruptedException e) { @@ -659,7 +659,7 @@ private void handleBootstrapAction(SolrQueryRequest req, SolrQueryResponse rsp) }; try { - core.getCoreDescriptor().getCoreContainer().getUpdateShardHandler().getUpdateExecutor().submit(runnable); + core.getCoreContainer().getUpdateShardHandler().getUpdateExecutor().submit(runnable); rsp.add(RESPONSE_STATUS, "submitted"); } catch (RejectedExecutionException ree) { // no problem, we're probably shutting down diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java b/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java index 48bfec0c3799..7ce060c4488d 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java @@ -112,7 +112,7 @@ void shutdown() { private class UpdateLogSynchronisation implements Runnable { private String getLeaderUrl() { - ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = core.getCoreContainer().getZkController(); ClusterState cstate = zkController.getClusterState(); DocCollection docCollection = cstate.getCollection(collection); ZkNodeProps leaderProps = docCollection.getLeader(shardId); diff --git a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java b/solr/core/src/java/org/apache/solr/handler/GraphHandler.java index 6d41d8308406..c843e0a21253 100644 --- a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/GraphHandler.java @@ -84,12 +84,12 @@ public void inform(SolrCore core) { String defaultCollection; String defaultZkhost; - CoreContainer coreContainer = core.getCoreDescriptor().getCoreContainer(); + CoreContainer coreContainer = core.getCoreContainer(); this.coreName = core.getName(); if(coreContainer.isZooKeeperAware()) { defaultCollection = core.getCoreDescriptor().getCollectionName(); - defaultZkhost = core.getCoreDescriptor().getCoreContainer().getZkController().getZkServerAddress(); + defaultZkhost = core.getCoreContainer().getZkController().getZkServerAddress(); streamFactory.withCollectionZkHost(defaultCollection, defaultZkhost); streamFactory.withDefaultZkHost(defaultZkhost); } diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java index d79effd83d1d..96e505a86d97 100644 --- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java +++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java @@ -215,7 +215,7 @@ private static HttpClient createHttpClient(SolrCore core, String httpBasicAuthUs httpClientParams.set(HttpClientUtil.PROP_BASIC_AUTH_PASS, httpBasicAuthPassword); httpClientParams.set(HttpClientUtil.PROP_ALLOW_COMPRESSION, useCompression); - return HttpClientUtil.createClient(httpClientParams, core.getCoreDescriptor().getCoreContainer().getUpdateShardHandler().getConnectionManager(), true); + return HttpClientUtil.createClient(httpClientParams, core.getCoreContainer().getUpdateShardHandler().getConnectionManager(), true); } public IndexFetcher(final NamedList initArgs, final ReplicationHandler handler, final SolrCore sc) { @@ -482,7 +482,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel // because of soft commits (which open a searcher on IW's data) // so we need to close the existing searcher on the last commit // and wait until we are able to clean up all unused lucene files - if (solrCore.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { + if (solrCore.getCoreContainer().isZooKeeperAware()) { solrCore.closeSearcher(); } @@ -639,7 +639,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel } private Replica getLeaderReplica() throws InterruptedException { - ZkController zkController = solrCore.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = solrCore.getCoreContainer().getZkController(); CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor(); Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry( cd.getCollectionName(), cd.getShardId()); @@ -658,7 +658,7 @@ private void cleanup(final SolrCore core, Directory tmpIndexDir, } } - if (core.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { + if (core.getCoreContainer().isZooKeeperAware()) { // we only track replication success in SolrCloud mode core.getUpdateHandler().getSolrCoreState().setLastReplicateIndexSuccess(successfulInstall); } @@ -846,7 +846,7 @@ private void openNewSearcherAndUpdateCommitPoint() throws IOException { IndexCommit commitPoint; // must get the latest solrCore object because the one we have might be closed because of a reload // todo stop keeping solrCore around - SolrCore core = solrCore.getCoreDescriptor().getCoreContainer().getCore(solrCore.getName()); + SolrCore core = solrCore.getCoreContainer().getCore(solrCore.getName()); try { Future[] waitSearcher = new Future[1]; searcher = core.getSearcher(true, true, waitSearcher, true); @@ -874,7 +874,7 @@ private void reloadCore() { final CountDownLatch latch = new CountDownLatch(1); new Thread(() -> { try { - solrCore.getCoreDescriptor().getCoreContainer().reload(solrCore.getName()); + solrCore.getCoreContainer().reload(solrCore.getName()); } catch (Exception e) { LOG.error("Could not reload core ", e); } finally { diff --git a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java index 9c86350d82a4..50ea711e9d5c 100644 --- a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java @@ -19,8 +19,6 @@ import java.io.IOException; import java.io.Reader; import java.lang.invoke.MethodHandles; -import java.net.MalformedURLException; -import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; @@ -481,12 +479,4 @@ public MoreLikeThis getMoreLikeThis() public String getDescription() { return "Solr MoreLikeThis"; } - - @Override - public URL[] getDocs() { - try { - return new URL[] { new URL("http://wiki.apache.org/solr/MoreLikeThis") }; - } - catch( MalformedURLException ex ) { return null; } - } } diff --git a/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java b/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java index 904931863583..bce374f4aea4 100644 --- a/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java @@ -20,7 +20,6 @@ import org.apache.solr.api.ApiBag; import org.apache.solr.handler.component.*; -import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -42,11 +41,6 @@ public String getDescription() { return "The realtime get handler"; } - @Override - public URL[] getDocs() { - return null; - } - @Override public Collection getApis() { return ApiBag.wrapRequestHandlers(this, "core.RealtimeGet"); diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index 98bf11ab376b..94ff1893928b 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -90,6 +90,8 @@ import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.core.backup.repository.LocalFileSystemRepository; import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.handler.IndexFetcher.IndexFetchResult; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; @@ -162,6 +164,10 @@ public static CommitVersionInfo build(IndexCommit commit) { } return new CommitVersionInfo(generation, version); } + + public String toString() { + return "generation=" + generation + ",version=" + version; + } } private IndexFetcher pollingIndexFetcher; @@ -437,7 +443,7 @@ private void restore(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest String location = params.get(CoreAdminParams.BACKUP_LOCATION); String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY); - CoreContainer cc = core.getCoreDescriptor().getCoreContainer(); + CoreContainer cc = core.getCoreContainer(); BackupRepository repo = null; if (repoName != null) { repo = cc.newBackupRepository(Optional.of(repoName)); @@ -555,7 +561,7 @@ private void doSnapShoot(SolrParams params, SolrQueryResponse rsp, String location = params.get(CoreAdminParams.BACKUP_LOCATION); String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY); - CoreContainer cc = core.getCoreDescriptor().getCoreContainer(); + CoreContainer cc = core.getCoreContainer(); BackupRepository repo = null; if (repoName != null) { repo = cc.newBackupRepository(Optional.of(repoName)); @@ -693,7 +699,7 @@ private void getFileList(SolrParams solrParams, SolrQueryResponse rsp) { } } - if (confFileNameAlias.size() < 1 || core.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) + if (confFileNameAlias.size() < 1 || core.getCoreContainer().isZooKeeperAware()) return; LOG.debug("Adding config files to list: " + includeConfFiles); //if configuration files need to be included get their details @@ -851,52 +857,56 @@ private CommitVersionInfo getIndexVersion() { } @Override - @SuppressWarnings("unchecked") - public NamedList getStatistics() { - NamedList list = super.getStatistics(); - if (core != null) { - list.add("indexSize", NumberUtils.readableSize(core.getIndexSize())); - CommitVersionInfo vInfo = (core != null && !core.isClosed()) ? getIndexVersion(): null; - list.add("indexVersion", null == vInfo ? 0 : vInfo.version); - list.add(GENERATION, null == vInfo ? 0 : vInfo.generation); - - list.add("indexPath", core.getIndexDir()); - list.add("isMaster", String.valueOf(isMaster)); - list.add("isSlave", String.valueOf(isSlave)); - + public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { + super.initializeMetrics(manager, registry, scope); + + manager.registerGauge(this, registry, () -> core != null ? NumberUtils.readableSize(core.getIndexSize()) : "", true, + "indexSize", getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> (core != null && !core.isClosed() ? getIndexVersion().toString() : ""), true, + "indexVersion", getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> (core != null && !core.isClosed() ? getIndexVersion().generation : 0), true, + GENERATION, getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> core != null ? core.getIndexDir() : "", true, + "indexPath", getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> isMaster, true, + "isMaster", getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> isSlave, true, + "isSlave", getCategory().toString(), scope); + final MetricsMap fetcherMap = new MetricsMap((detailed, map) -> { IndexFetcher fetcher = currentIndexFetcher; if (fetcher != null) { - list.add(MASTER_URL, fetcher.getMasterUrl()); + map.put(MASTER_URL, fetcher.getMasterUrl()); if (getPollInterval() != null) { - list.add(POLL_INTERVAL, getPollInterval()); + map.put(POLL_INTERVAL, getPollInterval()); } - list.add("isPollingDisabled", String.valueOf(isPollingDisabled())); - list.add("isReplicating", String.valueOf(isReplicating())); + map.put("isPollingDisabled", isPollingDisabled()); + map.put("isReplicating", isReplicating()); long elapsed = fetcher.getReplicationTimeElapsed(); long val = fetcher.getTotalBytesDownloaded(); if (elapsed > 0) { - list.add("timeElapsed", elapsed); - list.add("bytesDownloaded", val); - list.add("downloadSpeed", val / elapsed); + map.put("timeElapsed", elapsed); + map.put("bytesDownloaded", val); + map.put("downloadSpeed", val / elapsed); } Properties props = loadReplicationProperties(); - addVal(list, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class); - addVal(list, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class); - addVal(list, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Date.class); - addVal(list, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class); - addVal(list, IndexFetcher.TIMES_FAILED, props, Integer.class); - addVal(list, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class); - addVal(list, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class); - addVal(list, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class); - addVal(list, IndexFetcher.CONF_FILES_REPLICATED, props, String.class); + addVal(map, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class); + addVal(map, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class); + addVal(map, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Date.class); + addVal(map, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class); + addVal(map, IndexFetcher.TIMES_FAILED, props, Integer.class); + addVal(map, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class); + addVal(map, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class); + addVal(map, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class); + addVal(map, IndexFetcher.CONF_FILES_REPLICATED, props, String.class); } - if (isMaster) { - if (includeConfFiles != null) list.add("confFilesToReplicate", includeConfFiles); - list.add(REPLICATE_AFTER, getReplicateAfterStrings()); - list.add("replicationEnabled", String.valueOf(replicationEnabled.get())); - } - } - return list; + }); + manager.registerGauge(this, registry, fetcherMap, true, "fetcher", getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> isMaster && includeConfFiles != null ? includeConfFiles : "", true, + "confFilesToReplicate", getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> isMaster ? getReplicateAfterStrings() : Collections.emptyList(), true, + REPLICATE_AFTER, getCategory().toString(), scope); + manager.registerGauge(this, registry, () -> isMaster && replicationEnabled.get(), true, + "replicationEnabled", getCategory().toString(), scope); } /** @@ -1064,24 +1074,39 @@ private NamedList getReplicationDetails(boolean showSlaveDetails) { } private void addVal(NamedList nl, String key, Properties props, Class clzz) { + Object val = formatVal(key, props, clzz); + if (val != null) { + nl.add(key, val); + } + } + + private void addVal(Map map, String key, Properties props, Class clzz) { + Object val = formatVal(key, props, clzz); + if (val != null) { + map.put(key, val); + } + } + + private Object formatVal(String key, Properties props, Class clzz) { String s = props.getProperty(key); - if (s == null || s.trim().length() == 0) return; + if (s == null || s.trim().length() == 0) return null; if (clzz == Date.class) { try { Long l = Long.parseLong(s); - nl.add(key, new Date(l).toString()); - } catch (NumberFormatException e) {/*no op*/ } + return new Date(l).toString(); + } catch (NumberFormatException e) { + return null; + } } else if (clzz == List.class) { String ss[] = s.split(","); List l = new ArrayList<>(); for (String s1 : ss) { l.add(new Date(Long.parseLong(s1)).toString()); } - nl.add(key, l); + return l; } else { - nl.add(key, s); + return s; } - } private List getReplicateAfterStrings() { @@ -1189,7 +1214,7 @@ public void inform(SolrCore core) { boolean enableMaster = isEnabled( master ); if (enableMaster || enableSlave) { - if (core.getCoreDescriptor().getCoreContainer().getZkController() != null) { + if (core.getCoreContainer().getZkController() != null) { LOG.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" + " intend this behavior, it usually indicates a mis-configuration. Master setting is " + Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave)); diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java index 1958e11f5874..421976801b27 100644 --- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java +++ b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java @@ -17,9 +17,11 @@ package org.apache.solr.handler; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import com.codahale.metrics.MetricRegistry; import com.google.common.collect.ImmutableList; import com.codahale.metrics.Counter; import com.codahale.metrics.Meter; @@ -27,11 +29,10 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.SuppressForbidden; import org.apache.solr.core.PluginBag; import org.apache.solr.core.PluginInfo; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.request.SolrQueryRequest; @@ -42,7 +43,6 @@ import org.apache.solr.api.Api; import org.apache.solr.api.ApiBag; import org.apache.solr.api.ApiSupport; -import org.apache.solr.util.stats.MetricUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,7 +51,7 @@ /** * */ -public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoMBean, SolrMetricProducer, NestedRequestHandler,ApiSupport { +public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoBean, SolrMetricProducer, NestedRequestHandler,ApiSupport { protected NamedList initArgs = null; protected SolrParams defaults; @@ -74,6 +74,9 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo private PluginInfo pluginInfo; + private Set metricNames = new HashSet<>(); + private MetricRegistry registry; + @SuppressForbidden(reason = "Need currentTimeMillis, used only for stats output") public RequestHandlerBase() { handlerStart = System.currentTimeMillis(); @@ -138,13 +141,15 @@ public void init(NamedList args) { @Override public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { - numErrors = manager.meter(registryName, "errors", getCategory().toString(), scope); - numServerErrors = manager.meter(registryName, "serverErrors", getCategory().toString(), scope); - numClientErrors = manager.meter(registryName, "clientErrors", getCategory().toString(), scope); - numTimeouts = manager.meter(registryName, "timeouts", getCategory().toString(), scope); - requests = manager.counter(registryName, "requests", getCategory().toString(), scope); - requestTimes = manager.timer(registryName, "requestTimes", getCategory().toString(), scope); - totalTime = manager.counter(registryName, "totalTime", getCategory().toString(), scope); + registry = manager.registry(registryName); + numErrors = manager.meter(this, registryName, "errors", getCategory().toString(), scope); + numServerErrors = manager.meter(this, registryName, "serverErrors", getCategory().toString(), scope); + numClientErrors = manager.meter(this, registryName, "clientErrors", getCategory().toString(), scope); + numTimeouts = manager.meter(this, registryName, "timeouts", getCategory().toString(), scope); + requests = manager.counter(this, registryName, "requests", getCategory().toString(), scope); + requestTimes = manager.timer(this, registryName, "requestTimes", getCategory().toString(), scope); + totalTime = manager.counter(this, registryName, "totalTime", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> handlerStart, true, "handlerStart", getCategory().toString(), scope); } public static SolrParams getSolrParamsFromNamedList(NamedList args, String key) { @@ -225,24 +230,21 @@ public String getName() { @Override public abstract String getDescription(); - @Override - public String getSource() { return null; } - - @Override - public String getVersion() { - return getClass().getPackage().getSpecificationVersion(); - } - + @Override public Category getCategory() { return Category.QUERY; } @Override - public URL[] getDocs() { - return null; // this can be overridden, but not required + public Set getMetricNames() { + return metricNames; } + @Override + public MetricRegistry getMetricRegistry() { + return registry; + } @Override public SolrRequestHandler getSubHandler(String subPath) { @@ -285,22 +287,6 @@ public PluginInfo getPluginInfo(){ return pluginInfo; } - - @Override - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap<>(); - lst.add("handlerStart",handlerStart); - lst.add("requests", requests.getCount()); - lst.add("errors", numErrors.getCount()); - lst.add("serverErrors", numServerErrors.getCount()); - lst.add("clientErrors", numClientErrors.getCount()); - lst.add("timeouts", numTimeouts.getCount()); - // convert totalTime to ms - lst.add("totalTime", MetricUtils.nsToMs(totalTime.getCount())); - MetricUtils.addMetrics(lst, requestTimes); - return lst; - } - @Override public Collection getApis() { return ImmutableList.of(new ApiBag.ReqHandlerToApi(this, ApiBag.constructSpec(pluginInfo))); diff --git a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java index f307baa72ead..c80d0d7743b3 100644 --- a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java @@ -60,10 +60,10 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware, Per private boolean isCloud = false; public void inform(SolrCore core) { - CoreContainer coreContainer = core.getCoreDescriptor().getCoreContainer(); + CoreContainer coreContainer = core.getCoreContainer(); if(coreContainer.isZooKeeperAware()) { - defaultZkhost = core.getCoreDescriptor().getCoreContainer().getZkController().getZkServerAddress(); + defaultZkhost = core.getCoreContainer().getZkController().getZkServerAddress(); defaultWorkerCollection = core.getCoreDescriptor().getCollectionName(); isCloud = true; } diff --git a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java index 2660cba83548..08ce83886c94 100644 --- a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java @@ -436,7 +436,7 @@ private void handleParams(ArrayList ops, RequestParams params) log.debug("persisted to version : {} ", latestVersion); waitForAllReplicasState(req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName(), - req.getCore().getCoreDescriptor().getCoreContainer().getZkController(), RequestParams.NAME, latestVersion, 30); + req.getCore().getCoreContainer().getZkController(), RequestParams.NAME, latestVersion, 30); } } else { @@ -495,12 +495,12 @@ private void handleCommands(List ops, ConfigOverlay overlay) t ConfigOverlay.RESOURCE_NAME, overlay.toByteArray(), true); log.info("Executed config commands successfully and persisted to ZK {}", ops); waitForAllReplicasState(req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName(), - req.getCore().getCoreDescriptor().getCoreContainer().getZkController(), + req.getCore().getCoreContainer().getZkController(), ConfigOverlay.NAME, latestVersion, 30); } else { SolrResourceLoader.persistConfLocally(loader, ConfigOverlay.RESOURCE_NAME, overlay.toByteArray()); - req.getCore().getCoreDescriptor().getCoreContainer().reload(req.getCore().getName()); + req.getCore().getCoreContainer().reload(req.getCore().getName()); log.info("Executed config commands successfully and persited to File System {}", ops); } @@ -702,12 +702,6 @@ public String getDescription() { return "Edit solrconfig.xml"; } - - @Override - public String getVersion() { - return getClass().getPackage().getSpecificationVersion(); - } - @Override public Category getCategory() { return Category.ADMIN; diff --git a/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java index d5eae080cfac..f167b1d5dc36 100644 --- a/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java @@ -18,9 +18,6 @@ import org.apache.solr.handler.component.*; -import java.net.MalformedURLException; -import java.net.URL; - /** * * @@ -47,14 +44,6 @@ public class StandardRequestHandler extends SearchHandler public String getDescription() { return "The standard Solr request handler"; } - - @Override - public URL[] getDocs() { - try { - return new URL[] { new URL("http://wiki.apache.org/solr/StandardRequestHandler") }; - } - catch( MalformedURLException ex ) { return null; } - } } diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java index 6f373f65b0a2..c750ce958798 100644 --- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java @@ -33,41 +33,7 @@ import org.apache.solr.client.solrj.io.SolrClientCache; import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.comp.StreamComparator; -import org.apache.solr.client.solrj.io.eval.AbsoluteValueEvaluator; -import org.apache.solr.client.solrj.io.eval.AddEvaluator; -import org.apache.solr.client.solrj.io.eval.AndEvaluator; -import org.apache.solr.client.solrj.io.eval.ArcCosineEvaluator; -import org.apache.solr.client.solrj.io.eval.ArcSineEvaluator; -import org.apache.solr.client.solrj.io.eval.ArcTangentEvaluator; -import org.apache.solr.client.solrj.io.eval.CeilingEvaluator; -import org.apache.solr.client.solrj.io.eval.CoalesceEvaluator; -import org.apache.solr.client.solrj.io.eval.CosineEvaluator; -import org.apache.solr.client.solrj.io.eval.CubedRootEvaluator; -import org.apache.solr.client.solrj.io.eval.DivideEvaluator; -import org.apache.solr.client.solrj.io.eval.EqualsEvaluator; -import org.apache.solr.client.solrj.io.eval.ExclusiveOrEvaluator; -import org.apache.solr.client.solrj.io.eval.FloorEvaluator; -import org.apache.solr.client.solrj.io.eval.GreaterThanEqualToEvaluator; -import org.apache.solr.client.solrj.io.eval.GreaterThanEvaluator; -import org.apache.solr.client.solrj.io.eval.HyperbolicCosineEvaluator; -import org.apache.solr.client.solrj.io.eval.HyperbolicSineEvaluator; -import org.apache.solr.client.solrj.io.eval.HyperbolicTangentEvaluator; -import org.apache.solr.client.solrj.io.eval.IfThenElseEvaluator; -import org.apache.solr.client.solrj.io.eval.LessThanEqualToEvaluator; -import org.apache.solr.client.solrj.io.eval.LessThanEvaluator; -import org.apache.solr.client.solrj.io.eval.ModuloEvaluator; -import org.apache.solr.client.solrj.io.eval.MultiplyEvaluator; -import org.apache.solr.client.solrj.io.eval.NaturalLogEvaluator; -import org.apache.solr.client.solrj.io.eval.NotEvaluator; -import org.apache.solr.client.solrj.io.eval.OrEvaluator; -import org.apache.solr.client.solrj.io.eval.PowerEvaluator; -import org.apache.solr.client.solrj.io.eval.RawValueEvaluator; -import org.apache.solr.client.solrj.io.eval.RoundEvaluator; -import org.apache.solr.client.solrj.io.eval.SineEvaluator; -import org.apache.solr.client.solrj.io.eval.SquareRootEvaluator; -import org.apache.solr.client.solrj.io.eval.SubtractEvaluator; -import org.apache.solr.client.solrj.io.eval.TangentEvaluator; -import org.apache.solr.client.solrj.io.eval.UuidEvaluator; +import org.apache.solr.client.solrj.io.eval.*; import org.apache.solr.client.solrj.io.graph.GatherNodesStream; import org.apache.solr.client.solrj.io.graph.ShortestPathStream; import org.apache.solr.client.solrj.io.ops.ConcatOperation; @@ -133,12 +99,12 @@ public void inform(SolrCore core) { String defaultCollection; String defaultZkhost; - CoreContainer coreContainer = core.getCoreDescriptor().getCoreContainer(); + CoreContainer coreContainer = core.getCoreContainer(); this.coreName = core.getName(); if(coreContainer.isZooKeeperAware()) { defaultCollection = core.getCoreDescriptor().getCollectionName(); - defaultZkhost = core.getCoreDescriptor().getCoreContainer().getZkController().getZkServerAddress(); + defaultZkhost = core.getCoreContainer().getZkController().getZkServerAddress(); streamFactory.withCollectionZkHost(defaultCollection, defaultZkhost); streamFactory.withDefaultZkHost(defaultZkhost); modelCache = new ModelCache(250, @@ -189,31 +155,38 @@ public void inform(SolrCore core) { .withFunctionName("executor", ExecutorStream.class) .withFunctionName("null", NullStream.class) .withFunctionName("priority", PriorityStream.class) - .withFunctionName("significantTerms", SignificantTermsStream.class) + .withFunctionName("significantTerms", SignificantTermsStream.class) .withFunctionName("cartesianProduct", CartesianProductStream.class) - .withFunctionName("shuffle", ShuffleStream.class) - + .withFunctionName("shuffle", ShuffleStream.class) + .withFunctionName("calc", CalculatorStream.class) + .withFunctionName("eval",EvalStream.class) + .withFunctionName("echo", EchoStream.class) + .withFunctionName("cell", CellStream.class) + .withFunctionName("list", ListStream.class) + .withFunctionName("let", LetStream.class) + .withFunctionName("get", GetStream.class) + .withFunctionName("timeseries", TimeSeriesStream.class) // metrics - .withFunctionName("min", MinMetric.class) + .withFunctionName("min", MinMetric.class) .withFunctionName("max", MaxMetric.class) .withFunctionName("avg", MeanMetric.class) .withFunctionName("sum", SumMetric.class) .withFunctionName("count", CountMetric.class) // tuple manipulation operations - .withFunctionName("replace", ReplaceOperation.class) + .withFunctionName("replace", ReplaceOperation.class) .withFunctionName("concat", ConcatOperation.class) // stream reduction operations - .withFunctionName("group", GroupOperation.class) + .withFunctionName("group", GroupOperation.class) .withFunctionName("distinct", DistinctOperation.class) .withFunctionName("having", HavingStream.class) // Stream Evaluators - .withFunctionName("val", RawValueEvaluator.class) + .withFunctionName("val", RawValueEvaluator.class) // Boolean Stream Evaluators - .withFunctionName("and", AndEvaluator.class) + .withFunctionName("and", AndEvaluator.class) .withFunctionName("eor", ExclusiveOrEvaluator.class) .withFunctionName("eq", EqualsEvaluator.class) .withFunctionName("gt", GreaterThanEvaluator.class) @@ -221,10 +194,23 @@ public void inform(SolrCore core) { .withFunctionName("lt", LessThanEvaluator.class) .withFunctionName("lteq", LessThanEqualToEvaluator.class) .withFunctionName("not", NotEvaluator.class) - .withFunctionName("or", OrEvaluator.class) - + .withFunctionName("or", OrEvaluator.class) + + // Date Time Evaluators + .withFunctionName(TemporalEvaluatorYear.FUNCTION_NAME, TemporalEvaluatorYear.class) + .withFunctionName(TemporalEvaluatorMonth.FUNCTION_NAME, TemporalEvaluatorMonth.class) + .withFunctionName(TemporalEvaluatorDay.FUNCTION_NAME, TemporalEvaluatorDay.class) + .withFunctionName(TemporalEvaluatorDayOfYear.FUNCTION_NAME, TemporalEvaluatorDayOfYear.class) + .withFunctionName(TemporalEvaluatorHour.FUNCTION_NAME, TemporalEvaluatorHour.class) + .withFunctionName(TemporalEvaluatorMinute.FUNCTION_NAME, TemporalEvaluatorMinute.class) + .withFunctionName(TemporalEvaluatorSecond.FUNCTION_NAME, TemporalEvaluatorSecond.class) + .withFunctionName(TemporalEvaluatorEpoch.FUNCTION_NAME, TemporalEvaluatorEpoch.class) + .withFunctionName(TemporalEvaluatorWeek.FUNCTION_NAME, TemporalEvaluatorWeek.class) + .withFunctionName(TemporalEvaluatorQuarter.FUNCTION_NAME, TemporalEvaluatorQuarter.class) + .withFunctionName(TemporalEvaluatorDayOfQuarter.FUNCTION_NAME, TemporalEvaluatorDayOfQuarter.class) + // Number Stream Evaluators - .withFunctionName("abs", AbsoluteValueEvaluator.class) + .withFunctionName("abs", AbsoluteValueEvaluator.class) .withFunctionName("add", AddEvaluator.class) .withFunctionName("div", DivideEvaluator.class) .withFunctionName("mult", MultiplyEvaluator.class) @@ -232,7 +218,7 @@ public void inform(SolrCore core) { .withFunctionName("log", NaturalLogEvaluator.class) .withFunctionName("pow", PowerEvaluator.class) .withFunctionName("mod", ModuloEvaluator.class) - .withFunctionName("ceil", CeilingEvaluator.class) + .withFunctionName("ceil", CeilingEvaluator.class) .withFunctionName("floor", FloorEvaluator.class) .withFunctionName("sin", SineEvaluator.class) .withFunctionName("asin", ArcSineEvaluator.class) @@ -243,15 +229,17 @@ public void inform(SolrCore core) { .withFunctionName("tan", TangentEvaluator.class) .withFunctionName("atan", ArcTangentEvaluator.class) .withFunctionName("tanh", HyperbolicTangentEvaluator.class) - .withFunctionName("round", RoundEvaluator.class) + .withFunctionName("round", RoundEvaluator.class) .withFunctionName("sqrt", SquareRootEvaluator.class) .withFunctionName("cbrt", CubedRootEvaluator.class) .withFunctionName("coalesce", CoalesceEvaluator.class) .withFunctionName("uuid", UuidEvaluator.class) - + + // Conditional Stream Evaluators - .withFunctionName("if", IfThenElseEvaluator.class) - .withFunctionName("analyze", AnalyzeEvaluator.class) + .withFunctionName("if", IfThenElseEvaluator.class) + .withFunctionName("analyze", AnalyzeEvaluator.class) + .withFunctionName("convert", ConversionEvaluator.class) ; // This pulls all the overrides and additions from the config diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java index 581fe46ba663..3cb21ab18e10 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java @@ -65,6 +65,7 @@ enum Cmd implements ApiCommand { GET_CLUSTER_STATUS_CMD(EndPoint.CLUSTER_CMD_STATUS, GET, REQUESTSTATUS_OP), DELETE_CLUSTER_STATUS(EndPoint.CLUSTER_CMD_STATUS_DELETE, DELETE, DELETESTATUS_OP), GET_A_COLLECTION(EndPoint.COLLECTION_STATE, GET, CLUSTERSTATUS_OP), + LIST_ALIASES(EndPoint.CLUSTER_ALIASES, GET, LISTALIASES_OP), CREATE_COLLECTION(EndPoint.COLLECTIONS_COMMANDS, POST, CREATE_OP, @@ -290,6 +291,7 @@ public void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSu enum EndPoint implements V2EndPoint { CLUSTER("cluster"), + CLUSTER_ALIASES("cluster.aliases"), CLUSTER_CMD("cluster.Commands"), CLUSTER_NODES("cluster.nodes"), CLUSTER_CMD_STATUS("cluster.commandstatus"), diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java index bb061902a664..d5c49274d747 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java @@ -52,6 +52,7 @@ import org.apache.solr.cloud.rule.Rule; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; +import org.apache.solr.common.cloud.Aliases; import org.apache.solr.common.cloud.ClusterProperties; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; @@ -460,6 +461,19 @@ enum CollectionOperation implements CollectionOp { return req.getParams().required().getAll(null, NAME, "collections"); }), DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)), + + /** + * Handle cluster status request. + * Can return status per specific collection/shard or per all collections. + */ + LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> { + ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader(); + Aliases aliases = zkStateReader.getAliases(); + if (aliases != null) { + rsp.getValues().add("aliases", aliases.getCollectionAliasMap()); + } + return null; + }), SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true, (req, rsp, h) -> { String name = req.getParams().required().get(COLLECTION_PROP); // TODO : add support for multiple shards diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java index d3489df05b0e..3f857e79d67f 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java @@ -306,13 +306,15 @@ public Boolean registerV2() { @Override public Name getPermissionName(AuthorizationContext ctx) { - switch (ctx.getHttpMethod()) { - case "GET": - return Name.CONFIG_READ_PERM; - case "POST": + String a = ctx.getParams().get(ConfigSetParams.ACTION); + if (a != null) { + ConfigSetAction action = ConfigSetAction.get(a); + if (action == ConfigSetAction.CREATE || action == ConfigSetAction.DELETE || action == ConfigSetAction.UPLOAD) { return Name.CONFIG_EDIT_PERM; - default: - return null; + } else if (action == ConfigSetAction.LIST) { + return Name.CONFIG_READ_PERM; + } } + return null; } } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java index 275ec18a2d72..67463327e2f8 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java @@ -121,7 +121,7 @@ final public void init(NamedList args) { @Override public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { super.initializeMetrics(manager, registryName, scope); - parallelExecutor = MetricUtils.instrumentedExecutorService(parallelExecutor, manager.registry(registryName), + parallelExecutor = MetricUtils.instrumentedExecutorService(parallelExecutor, this, manager.registry(registryName), SolrMetricManager.mkName("parallelCoreAdminExecutor", getCategory().name(),scope, "threadPool")); } @Override diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java index b10aed11ac80..122d2cbf8b5f 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java @@ -60,7 +60,7 @@ public LoggingHandler() { @Override public void inform(SolrCore core) { if (watcher == null) { - watcher = core.getCoreDescriptor().getCoreContainer().getLogging(); + watcher = core.getCoreContainer().getLogging(); } } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java index 8e0b1fb8cc77..2db04d9d0b6a 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.MalformedURLException; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -707,14 +705,6 @@ public Category getCategory() { return Category.ADMIN; } - @Override - public URL[] getDocs() { - try { - return new URL[] { new URL("http://wiki.apache.org/solr/LukeRequestHandler") }; - } - catch( MalformedURLException ex ) { return null; } - } - /////////////////////////////////////////////////////////////////////////////////////// static class TermHistogram diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java index de39a6156064..3d8b6e080c93 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java @@ -134,7 +134,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw @Override public String getDescription() { - return "Handler for collecting and aggregating metric reports."; + return "Handler for collecting and aggregating SolrCloud metric reports."; } private static class MetricUpdateProcessor extends UpdateRequestProcessor { @@ -174,9 +174,15 @@ public void processAdd(AddUpdateCommand cmd) throws IOException { String labelId = (String)doc.getFieldValue(SolrReporter.LABEL_ID); doc.remove(SolrReporter.LABEL_ID); doc.forEach(f -> { - String key = MetricRegistry.name(labelId, metricName, f.getName()); + String key; + if (doc.size() == 1 && f.getName().equals(MetricUtils.VALUE)) { + // only one "value" field - skip the unnecessary field name + key = MetricRegistry.name(labelId, metricName); + } else { + key = MetricRegistry.name(labelId, metricName, f.getName()); + } MetricRegistry registry = metricManager.registry(groupId); - AggregateMetric metric = getOrRegister(registry, key, new AggregateMetric()); + AggregateMetric metric = getOrCreate(registry, key); Object o = f.getFirstValue(); if (o != null) { metric.set(reporterId, o); @@ -187,11 +193,12 @@ public void processAdd(AddUpdateCommand cmd) throws IOException { }); } - private AggregateMetric getOrRegister(MetricRegistry registry, String name, AggregateMetric add) { + private AggregateMetric getOrCreate(MetricRegistry registry, String name) { AggregateMetric existing = (AggregateMetric)registry.getMetrics().get(name); if (existing != null) { return existing; } + AggregateMetric add = new AggregateMetric(); try { registry.register(name, add); return add; diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java index 4dc86d978556..11f68212e222 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -52,6 +53,14 @@ public class MetricsHandler extends RequestHandlerBase implements PermissionName final SolrMetricManager metricManager; public static final String COMPACT_PARAM = "compact"; + public static final String PREFIX_PARAM = "prefix"; + public static final String REGEX_PARAM = "regex"; + public static final String PROPERTY_PARAM = "property"; + public static final String REGISTRY_PARAM = "registry"; + public static final String GROUP_PARAM = "group"; + public static final String TYPE_PARAM = "type"; + + public static final String ALL = "all"; public MetricsHandler() { this.container = null; @@ -74,8 +83,9 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Core container instance not initialized"); } - boolean compact = req.getParams().getBool(COMPACT_PARAM, false); + boolean compact = req.getParams().getBool(COMPACT_PARAM, true); MetricFilter mustMatchFilter = parseMustMatchFilter(req); + MetricUtils.PropertyFilter propertyFilter = parsePropertyFilter(req); List metricTypes = parseMetricTypes(req); List metricFilters = metricTypes.stream().map(MetricType::asMetricFilter).collect(Collectors.toList()); Set requestedRegistries = parseRegistries(req); @@ -83,30 +93,67 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw NamedList response = new SimpleOrderedMap(); for (String registryName : requestedRegistries) { MetricRegistry registry = metricManager.registry(registryName); - response.add(registryName, MetricUtils.toNamedList(registry, metricFilters, mustMatchFilter, false, - false, compact, null)); + SimpleOrderedMap result = new SimpleOrderedMap(); + MetricUtils.toMaps(registry, metricFilters, mustMatchFilter, propertyFilter, false, + false, compact, false, (k, v) -> result.add(k, v)); + if (result.size() > 0) { + response.add(registryName, result); + } } rsp.getValues().add("metrics", response); } private MetricFilter parseMustMatchFilter(SolrQueryRequest req) { - String[] prefixes = req.getParams().getParams("prefix"); - MetricFilter mustMatchFilter; + String[] prefixes = req.getParams().getParams(PREFIX_PARAM); + MetricFilter prefixFilter = null; if (prefixes != null && prefixes.length > 0) { Set prefixSet = new HashSet<>(); for (String prefix : prefixes) { prefixSet.addAll(StrUtils.splitSmart(prefix, ',')); } - mustMatchFilter = new SolrMetricManager.PrefixFilter((String[])prefixSet.toArray(new String[prefixSet.size()])); - } else { + prefixFilter = new SolrMetricManager.PrefixFilter(prefixSet); + } + String[] regexes = req.getParams().getParams(REGEX_PARAM); + MetricFilter regexFilter = null; + if (regexes != null && regexes.length > 0) { + regexFilter = new SolrMetricManager.RegexFilter(regexes); + } + MetricFilter mustMatchFilter; + if (prefixFilter == null && regexFilter == null) { mustMatchFilter = MetricFilter.ALL; + } else { + if (prefixFilter == null) { + mustMatchFilter = regexFilter; + } else if (regexFilter == null) { + mustMatchFilter = prefixFilter; + } else { + mustMatchFilter = new SolrMetricManager.OrFilter(prefixFilter, regexFilter); + } } return mustMatchFilter; } + private MetricUtils.PropertyFilter parsePropertyFilter(SolrQueryRequest req) { + String[] props = req.getParams().getParams(PROPERTY_PARAM); + if (props == null || props.length == 0) { + return MetricUtils.PropertyFilter.ALL; + } + final Set filter = new HashSet<>(); + for (String prop : props) { + if (prop != null && !prop.trim().isEmpty()) { + filter.add(prop.trim()); + } + } + if (filter.isEmpty()) { + return MetricUtils.PropertyFilter.ALL; + } else { + return (name) -> filter.contains(name); + } + } + private Set parseRegistries(SolrQueryRequest req) { - String[] groupStr = req.getParams().getParams("group"); - String[] registryStr = req.getParams().getParams("registry"); + String[] groupStr = req.getParams().getParams(GROUP_PARAM); + String[] registryStr = req.getParams().getParams(REGISTRY_PARAM); if ((groupStr == null || groupStr.length == 0) && (registryStr == null || registryStr.length == 0)) { // return all registries return container.getMetricManager().registryNames(); @@ -118,7 +165,7 @@ private Set parseRegistries(SolrQueryRequest req) { for (String g : groupStr) { List split = StrUtils.splitSmart(g, ','); for (String s : split) { - if (s.trim().equals("all")) { + if (s.trim().equals(ALL)) { allRegistries = true; break; } @@ -137,7 +184,7 @@ private Set parseRegistries(SolrQueryRequest req) { for (String r : registryStr) { List split = StrUtils.splitSmart(r, ','); for (String s : split) { - if (s.trim().equals("all")) { + if (s.trim().equals(ALL)) { allRegistries = true; break; } @@ -161,7 +208,7 @@ private Set parseRegistries(SolrQueryRequest req) { } private List parseMetricTypes(SolrQueryRequest req) { - String[] typeStr = req.getParams().getParams("type"); + String[] typeStr = req.getParams().getParams(TYPE_PARAM); List types = Collections.emptyList(); if (typeStr != null && typeStr.length > 0) { types = new ArrayList<>(); @@ -176,7 +223,8 @@ private List parseMetricTypes(SolrQueryRequest req) { metricTypes = types.stream().map(String::trim).map(MetricType::valueOf).collect(Collectors.toList()); } } catch (IllegalArgumentException e) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid metric type in: " + types + " specified. Must be one of (all, meter, timer, histogram, counter, gauge)", e); + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid metric type in: " + types + + " specified. Must be one of " + MetricType.SUPPORTED_TYPES_MSG, e); } return metricTypes; } @@ -199,6 +247,8 @@ enum MetricType { gauge(Gauge.class), all(null); + public static final String SUPPORTED_TYPES_MSG = EnumSet.allOf(MetricType.class).toString(); + private final Class klass; MetricType(Class klass) { diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java index a096e7983601..8bdc478788a8 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java @@ -16,14 +16,12 @@ */ package org.apache.solr.handler.admin; -import java.net.URL; -import java.util.ArrayList; import java.util.Map; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; @@ -48,13 +46,13 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw private static SimpleOrderedMap getSolrInfoBeans( SolrCore core, boolean stats ) { SimpleOrderedMap list = new SimpleOrderedMap<>(); - for (SolrInfoMBean.Category cat : SolrInfoMBean.Category.values()) + for (SolrInfoBean.Category cat : SolrInfoBean.Category.values()) { SimpleOrderedMap category = new SimpleOrderedMap<>(); list.add( cat.name(), category ); - Map reg = core.getInfoRegistry(); - for (Map.Entry entry : reg.entrySet()) { - SolrInfoMBean m = entry.getValue(); + Map reg = core.getInfoRegistry(); + for (Map.Entry entry : reg.entrySet()) { + SolrInfoBean m = entry.getValue(); if (m.getCategory() != cat) continue; String na = "Not Declared"; @@ -62,21 +60,10 @@ private static SimpleOrderedMap getSolrInfoBeans( SolrCore core, boolean category.add( entry.getKey(), info ); info.add( NAME, (m.getName() !=null ? m.getName() : na) ); - info.add( "version", (m.getVersion() !=null ? m.getVersion() : na) ); info.add( "description", (m.getDescription()!=null ? m.getDescription() : na) ); - info.add( "source", (m.getSource() !=null ? m.getSource() : na) ); - URL[] urls = m.getDocs(); - if ((urls != null) && (urls.length > 0)) { - ArrayList docs = new ArrayList<>(urls.length); - for( URL u : urls ) { - docs.add( u.toExternalForm() ); - } - info.add( "docs", docs ); - } - - if( stats ) { - info.add( "stats", m.getStatistics() ); + if (stats) { + info.add( "stats", m.getMetricsSnapshot()); } } } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java b/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java index d370beff4f23..0c2c9039ddec 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java @@ -196,7 +196,7 @@ public void execute(CallInfo it) throws Exception { .getNewestSearcher(false); SolrIndexSearcher searcher = searchHolder.get(); try { - log.debug(core.getCoreDescriptor().getCoreContainer() + log.debug(core.getCoreContainer() .getZkController().getNodeName() + " to replicate " + searcher.search(new MatchAllDocsQuery(), 1).totalHits diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java b/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java index 584a7ca7cd1b..dd4ae38bb70e 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java @@ -58,7 +58,7 @@ public void execute(CallInfo it) throws Exception { try (SolrCore core = it.handler.coreContainer.getCore(cname)) { if (core != null) { - syncStrategy = new SyncStrategy(core.getCoreDescriptor().getCoreContainer()); + syncStrategy = new SyncStrategy(core.getCoreContainer()); Map props = new HashMap<>(); props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl()); @@ -73,7 +73,7 @@ public void execute(CallInfo it) throws Exception { .getNewestSearcher(false); SolrIndexSearcher searcher = searchHolder.get(); try { - log.debug(core.getCoreDescriptor().getCoreContainer() + log.debug(core.getCoreContainer() .getZkController().getNodeName() + " synched " + searcher.search(new MatchAllDocsQuery(), 1).totalHits); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java index 805a6906d1ce..02577f117624 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java @@ -127,7 +127,7 @@ public static Set initHidden(SolrParams invariants) { public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws InterruptedException, KeeperException, IOException { - CoreContainer coreContainer = req.getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer coreContainer = req.getCore().getCoreContainer(); if (coreContainer.isZooKeeperAware()) { showFromZooKeeper(req, rsp, coreContainer); } else { diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java index f5f28c53205c..4faa466f4eec 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java @@ -20,7 +20,7 @@ import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.client.solrj.impl.XMLResponseParser; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.util.ContentStream; @@ -30,10 +30,7 @@ import org.apache.solr.response.SolrQueryResponse; import java.io.StringReader; -import java.net.URL; import java.text.NumberFormat; -import java.util.ArrayList; -import java.util.List; import java.util.Locale; import java.util.Set; import java.util.Map; @@ -117,7 +114,7 @@ protected NamedList>> getMBeanInfo(SolrQueryRequest String[] requestedCats = req.getParams().getParams("cat"); if (null == requestedCats || 0 == requestedCats.length) { - for (SolrInfoMBean.Category cat : SolrInfoMBean.Category.values()) { + for (SolrInfoBean.Category cat : SolrInfoBean.Category.values()) { cats.add(cat.name(), new SimpleOrderedMap>()); } } else { @@ -128,39 +125,27 @@ protected NamedList>> getMBeanInfo(SolrQueryRequest Set requestedKeys = arrayToSet(req.getParams().getParams("key")); - Map reg = req.getCore().getInfoRegistry(); - for (Map.Entry entry : reg.entrySet()) { + Map reg = req.getCore().getInfoRegistry(); + for (Map.Entry entry : reg.entrySet()) { addMBean(req, cats, requestedKeys, entry.getKey(),entry.getValue()); } - for (SolrInfoMBean infoMBean : req.getCore().getCoreDescriptor().getCoreContainer().getResourceLoader().getInfoMBeans()) { + for (SolrInfoBean infoMBean : req.getCore().getCoreContainer().getResourceLoader().getInfoMBeans()) { addMBean(req,cats,requestedKeys,infoMBean.getName(),infoMBean); } return cats; } - private void addMBean(SolrQueryRequest req, NamedList>> cats, Set requestedKeys, String key, SolrInfoMBean m) { + private void addMBean(SolrQueryRequest req, NamedList>> cats, Set requestedKeys, String key, SolrInfoBean m) { if ( ! ( requestedKeys.isEmpty() || requestedKeys.contains(key) ) ) return; NamedList> catInfo = cats.get(m.getCategory().name()); if ( null == catInfo ) return; NamedList mBeanInfo = new SimpleOrderedMap<>(); mBeanInfo.add("class", m.getName()); - mBeanInfo.add("version", m.getVersion()); mBeanInfo.add("description", m.getDescription()); - mBeanInfo.add("src", m.getSource()); - - // Use an external form - URL[] urls = m.getDocs(); - if(urls!=null) { - List docs = new ArrayList<>(urls.length); - for(URL url : urls) { - docs.add(url.toExternalForm()); - } - mBeanInfo.add("docs", docs); - } if (req.getParams().getFieldBool(key, "stats", false)) - mBeanInfo.add("stats", m.getStatistics()); + mBeanInfo.add("stats", m.getMetricsSnapshot()); catInfo.add(key, mBeanInfo); } @@ -246,6 +231,9 @@ public NamedList diffNamedList(NamedList ref, NamedList now) { } public Object diffObject(Object ref, Object now) { + if (now instanceof Map) { + now = new NamedList((Map)now); + } if(ref instanceof NamedList) { return diffNamedList((NamedList)ref, (NamedList)now); } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java index fc1679ff896f..8a2786c8d10b 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java @@ -16,10 +16,6 @@ */ package org.apache.solr.handler.admin; -import java.beans.BeanInfo; -import java.beans.IntrospectionException; -import java.beans.Introspector; -import java.beans.PropertyDescriptor; import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -27,23 +23,20 @@ import java.lang.invoke.MethodHandles; import java.lang.management.ManagementFactory; import java.lang.management.OperatingSystemMXBean; -import java.lang.management.PlatformManagedObject; import java.lang.management.RuntimeMXBean; -import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; import java.nio.charset.Charset; import java.text.DecimalFormat; import java.text.DecimalFormatSymbols; -import java.util.Arrays; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Locale; +import com.codahale.metrics.Gauge; import org.apache.commons.io.IOUtils; import org.apache.lucene.LucenePackage; import org.apache.lucene.util.Constants; -import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.SolrCore; @@ -53,6 +46,7 @@ import org.apache.solr.schema.IndexSchema; import org.apache.solr.util.RTimer; import org.apache.solr.util.RedactionUtils; +import org.apache.solr.util.stats.MetricUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -154,7 +148,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw private CoreContainer getCoreContainer(SolrQueryRequest req, SolrCore core) { CoreContainer coreContainer; if (core != null) { - coreContainer = req.getCore().getCoreDescriptor().getCoreContainer(); + coreContainer = req.getCore().getCoreContainer(); } else { coreContainer = cc; } @@ -207,29 +201,13 @@ public static SimpleOrderedMap getSystemInfo() { OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean(); info.add(NAME, os.getName()); // add at least this one - try { - // add remaining ones dynamically using Java Beans API - addMXBeanProperties(os, OperatingSystemMXBean.class, info); - } catch (IntrospectionException | ReflectiveOperationException e) { - log.warn("Unable to fetch properties of OperatingSystemMXBean.", e); - } - - // There are some additional beans we want to add (not available on all JVMs): - for (String clazz : Arrays.asList( - "com.sun.management.OperatingSystemMXBean", - "com.sun.management.UnixOperatingSystemMXBean", - "com.ibm.lang.management.OperatingSystemMXBean" - )) { - try { - final Class intf = Class.forName(clazz) - .asSubclass(PlatformManagedObject.class); - addMXBeanProperties(os, intf, info); - } catch (ClassNotFoundException e) { - // ignore - } catch (IntrospectionException | ReflectiveOperationException e) { - log.warn("Unable to fetch properties of JVM-specific OperatingSystemMXBean.", e); + // add remaining ones dynamically using Java Beans API + // also those from JVM implementation-specific classes + MetricUtils.addMXBeanMetrics(os, MetricUtils.OS_MXBEAN_CLASSES, null, (name, metric) -> { + if (info.get(name) == null) { + info.add(name, ((Gauge) metric).getValue()); } - } + }); // Try some command line things: try { @@ -243,34 +221,6 @@ public static SimpleOrderedMap getSystemInfo() { return info; } - /** - * Add all bean properties of a {@link PlatformManagedObject} to the given {@link NamedList}. - *

    - * If you are running a OpenJDK/Oracle JVM, there are nice properties in: - * {@code com.sun.management.UnixOperatingSystemMXBean} and - * {@code com.sun.management.OperatingSystemMXBean} - */ - static void addMXBeanProperties(T obj, Class intf, NamedList info) - throws IntrospectionException, ReflectiveOperationException { - if (intf.isInstance(obj)) { - final BeanInfo beanInfo = Introspector.getBeanInfo(intf, intf.getSuperclass(), Introspector.IGNORE_ALL_BEANINFO); - for (final PropertyDescriptor desc : beanInfo.getPropertyDescriptors()) { - final String name = desc.getName(); - if (info.get(name) == null) { - try { - final Object v = desc.getReadMethod().invoke(obj); - if(v != null) { - info.add(name, v); - } - } catch (InvocationTargetException ite) { - // ignore (some properties throw UOE) - } - } - } - } - } - - /** * Utility function to execute a function */ diff --git a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java index be2173339ea4..1f398a981f3e 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java @@ -17,7 +17,6 @@ package org.apache.solr.handler.component; import java.io.IOException; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -161,7 +160,7 @@ private void doDebugTrack(ResponseBuilder rb) { @SuppressForbidden(reason = "Need currentTimeMillis, only used for naming") private String generateRid(ResponseBuilder rb) { - String hostName = rb.req.getCore().getCoreDescriptor().getCoreContainer().getHostName(); + String hostName = rb.req.getCore().getCoreContainer().getHostName(); return hostName + "-" + rb.req.getCore().getName() + "-" + System.currentTimeMillis() + "-" + ridCounter.getAndIncrement(); } @@ -380,7 +379,7 @@ protected Object merge(Object source, Object dest, Set exclude) { ///////////////////////////////////////////// - /// SolrInfoMBean + /// SolrInfoBean //////////////////////////////////////////// @Override @@ -392,9 +391,4 @@ public String getDescription() { public Category getCategory() { return Category.OTHER; } - - @Override - public URL[] getDocs() { - return null; - } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java index 656ac7113f0b..2519a47969a1 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java @@ -17,8 +17,6 @@ package org.apache.solr.handler.component; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -764,7 +762,7 @@ private Query getGroupQuery(String fname, //////////////////////////////////////////// - /// SolrInfoMBean + /// SolrInfoBean //////////////////////////////////////////// @Override @@ -777,17 +775,6 @@ public Category getCategory() { return Category.QUERY; } - @Override - public URL[] getDocs() { - try { - return new URL[]{ - new URL("http://wiki.apache.org/solr/ExpandComponent") - }; - } catch (MalformedURLException e) { - throw new RuntimeException(e); - } - } - // this reader alters the content of the given reader so it should not // delegate the caching stuff private static class ReaderWrapper extends FilterLeafReader { diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java index 66b9ab8d7131..e1a6bc4f15b4 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -35,6 +34,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; +import org.apache.solr.client.solrj.util.ClientUtils; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.params.CommonParams; @@ -375,7 +375,7 @@ public int distributedProcess(ResponseBuilder rb) throws IOException { // add terms into the original facet.field command // do it via parameter reference to avoid another layer of encoding. - String termsKeyEncoded = QueryParsing.encodeLocalParamVal(termsKey); + String termsKeyEncoded = ClientUtils.encodeLocalParamVal(termsKey); if (dff.localParams != null) { facetCommand = commandPrefix + termsKeyEncoded + " " + dff.facetStr.substring(2); @@ -1212,7 +1212,7 @@ static Number num(Long val) { ///////////////////////////////////////////// - /// SolrInfoMBean + /// SolrInfoBean //////////////////////////////////////////// @Override @@ -1225,11 +1225,6 @@ public Category getCategory() { return Category.QUERY; } - @Override - public URL[] getDocs() { - return null; - } - /** * This class is used exclusively for merging results from each shard * in a distributed facet request. It plays no role in the computation diff --git a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java index d147be2fa73c..cc5211b32929 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java @@ -17,7 +17,6 @@ package org.apache.solr.handler.component; import java.io.IOException; -import java.net.URL; import java.util.Collections; import java.util.List; import java.util.Map; @@ -266,7 +265,7 @@ public void finishStage(ResponseBuilder rb) { } //////////////////////////////////////////// - /// SolrInfoMBean + /// SolrInfoBean //////////////////////////////////////////// @Override @@ -278,9 +277,4 @@ public String getDescription() { public Category getCategory() { return Category.HIGHLIGHTER; } - - @Override - public URL[] getDocs() { - return null; - } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java index 40e17a940524..8c0a9cb04742 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java @@ -279,7 +279,7 @@ public void prepDistributed(ResponseBuilder rb) { Map slices = null; CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor(); CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor(); - ZkController zkController = coreDescriptor.getCoreContainer().getZkController(); + ZkController zkController = req.getCore().getCoreContainer().getZkController(); final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req); diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java index 4262c20658c2..e3787cdf1ca8 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java +++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java @@ -36,7 +36,7 @@ import org.apache.solr.common.util.URLUtil; import org.apache.solr.core.CoreDescriptor; import org.apache.solr.core.PluginInfo; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.update.UpdateShardHandlerConfig; @@ -320,7 +320,7 @@ protected ReplicaListTransformer getReplicaListTransformer(final SolrQueryReques if (params.getBool(CommonParams.PREFER_LOCAL_SHARDS, false)) { final CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor(); - final ZkController zkController = coreDescriptor.getCoreContainer().getZkController(); + final ZkController zkController = req.getCore().getCoreContainer().getZkController(); final String preferredHostAddress = (zkController != null) ? zkController.getBaseUrl() : null; if (preferredHostAddress == null) { log.warn("Couldn't determine current host address to prefer local shards"); @@ -373,10 +373,10 @@ private String buildUrl(String url) { @Override public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { - String expandedScope = SolrMetricManager.mkName(scope, SolrInfoMBean.Category.QUERY.name()); + String expandedScope = SolrMetricManager.mkName(scope, SolrInfoBean.Category.QUERY.name()); clientConnectionManager.initializeMetrics(manager, registry, expandedScope); httpRequestExecutor.initializeMetrics(manager, registry, expandedScope); - commExecutor = MetricUtils.instrumentedExecutorService(commExecutor, + commExecutor = MetricUtils.instrumentedExecutorService(commExecutor, null, manager.registry(registry), SolrMetricManager.mkName("httpShardExecutor", expandedScope, "threadPool")); } diff --git a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java index ffb585889077..fd9d37d4aad7 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -414,7 +413,7 @@ NamedList getMoreLikeThese(ResponseBuilder rb, } // /////////////////////////////////////////// - // / SolrInfoMBean + // / SolrInfoBean // ////////////////////////////////////////// @Override @@ -426,9 +425,4 @@ public String getDescription() { public Category getCategory() { return Category.QUERY; } - - @Override - public URL[] getDocs() { - return null; - } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java index 49eadfca84fc..6369fde73527 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java @@ -20,7 +20,6 @@ import java.io.PrintWriter; import java.io.StringWriter; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -186,11 +185,6 @@ public void prepare(ResponseBuilder rb) throws IOException } rb.setSortSpec( parser.getSortSpec(true) ); - for (SchemaField sf:rb.getSortSpec().getSchemaFields()) { - if (sf != null && sf.getType().isPointField() && !sf.hasDocValues()) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"Can't sort on a point field without docValues"); - } - } rb.setQparser(parser); final String cursorStr = rb.req.getParams().get(CursorMarkParams.CURSOR_MARK_PARAM); @@ -263,24 +257,19 @@ protected void prepareGrouping(ResponseBuilder rb) throws IOException { String withinGroupSortStr = params.get(GroupParams.GROUP_SORT); //TODO: move weighting of sort final SortSpec withinGroupSortSpec; - - int withinGroupOffset = params.getInt(GroupParams.GROUP_OFFSET, 0); - int withinGroupCount = params.getInt(GroupParams.GROUP_LIMIT, 1); - if (withinGroupSortStr != null) { SortSpec parsedWithinGroupSortSpec = SortSpecParsing.parseSortSpec(withinGroupSortStr, req); withinGroupSortSpec = searcher.weightSortSpec(parsedWithinGroupSortSpec, Sort.RELEVANCE); - - withinGroupSortSpec.setOffset(withinGroupOffset); - withinGroupSortSpec.setCount(withinGroupCount); - } else { withinGroupSortSpec = new SortSpec( groupSortSpec.getSort(), groupSortSpec.getSchemaFields(), - withinGroupCount, - withinGroupOffset); + groupSortSpec.getCount(), + groupSortSpec.getOffset()); } + withinGroupSortSpec.setOffset(params.getInt(GroupParams.GROUP_OFFSET, 0)); + withinGroupSortSpec.setCount(params.getInt(GroupParams.GROUP_LIMIT, 1)); + groupingSpec.setWithinGroupSortSpec(withinGroupSortSpec); groupingSpec.setGroupSortSpec(groupSortSpec); @@ -474,7 +463,7 @@ public void process(ResponseBuilder rb) throws IOException for (String query : groupingSpec.getQueries()) { secondPhaseBuilder.addCommandField(new Builder() .setDocsToCollect(docsToCollect) - .setSort(groupingSpec.getGroupSort()) + .setSortSpec(groupingSpec.getGroupSortSpec()) .setQuery(query, rb.req) .setDocSet(searcher) .build() @@ -1383,7 +1372,7 @@ protected void returnFields(ResponseBuilder rb, ShardRequest sreq) { } ///////////////////////////////////////////// - /// SolrInfoMBean + /// SolrInfoBean //////////////////////////////////////////// @Override @@ -1396,11 +1385,6 @@ public Category getCategory() { return Category.QUERY; } - @Override - public URL[] getDocs() { - return null; - } - /** * Fake scorer for a single document * diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java index c12902e83b52..4fe1d7163241 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.io.InputStream; import java.lang.invoke.MethodHandles; -import java.net.MalformedURLException; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -219,7 +217,7 @@ public void inform(SolrCore core) { boolean exists = false; // check if using ZooKeeper - ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = core.getCoreContainer().getZkController(); if (zkController != null) { // TODO : shouldn't have to keep reading the config name when it has been read before exists = zkController.configFileExists(zkController.getZkStateReader().readConfigName(core.getCoreDescriptor().getCloudDescriptor().getCollectionName()), f); @@ -274,7 +272,7 @@ Map getElevationMap(IndexReader reader, SolrCore core) thr Config cfg; - ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = core.getCoreContainer().getZkController(); if (zkController != null) { cfg = new Config(core.getResourceLoader(), f, null, null); } else { @@ -597,7 +595,7 @@ public void process(ResponseBuilder rb) throws IOException { } //--------------------------------------------------------------------------------- - // SolrInfoMBean + // SolrInfoBean //--------------------------------------------------------------------------------- @Override @@ -605,16 +603,6 @@ public String getDescription() { return "Query Boosting -- boost particular documents for a given query"; } - @Override - public URL[] getDocs() { - try { - return new URL[]{ - new URL("http://wiki.apache.org/solr/QueryElevationComponent") - }; - } catch (MalformedURLException e) { - throw new RuntimeException(e); - } - } class ElevationComparatorSource extends FieldComparatorSource { private QueryElevationComponent.ElevationObj elevations; private SentinelIntSet ordSet; //the key half of the map diff --git a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java index 882decb16271..c0ceddb8d5f5 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -140,7 +139,7 @@ public void process(ResponseBuilder rb) throws IOException .getNewestSearcher(false); SolrIndexSearcher searcher = searchHolder.get(); try { - log.debug(req.getCore().getCoreDescriptor() + log.debug(req.getCore() .getCoreContainer().getZkController().getNodeName() + " min count to sync to (from most recent searcher view) " + searcher.search(new MatchAllDocsQuery(), 1).totalHits); @@ -761,7 +760,7 @@ public static SolrDocument toSolrDoc(SolrInputDocument sdoc, IndexSchema schema) out.add(f); } } else { - log.debug("Don't know how to handle field " + f); + log.debug("Don't know how to handle field {}", f); } } @@ -789,7 +788,7 @@ public int createSubRequests(ResponseBuilder rb) throws IOException { // TODO: handle collection=...? - ZkController zkController = rb.req.getCore().getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = rb.req.getCore().getCoreContainer().getZkController(); // if shards=... then use that if (zkController != null && params.get(ShardParams.SHARDS) == null) { @@ -927,7 +926,7 @@ private void addDocListToResponse(final ResponseBuilder rb, final SolrDocumentLi //////////////////////////////////////////// - /// SolrInfoMBean + /// SolrInfoBean //////////////////////////////////////////// @Override @@ -940,13 +939,6 @@ public Category getCategory() { return Category.QUERY; } - @Override - public URL[] getDocs() { - return null; - } - - - public void processGetFingeprint(ResponseBuilder rb) throws IOException { SolrQueryRequest req = rb.req; SolrParams params = req.getParams(); diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java index 6ef0ee4f2378..c615c5a7ac1d 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java @@ -17,13 +17,15 @@ package org.apache.solr.handler.component; import java.io.IOException; -import java.net.URL; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; +import com.codahale.metrics.MetricRegistry; import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.search.facet.FacetModule; import org.apache.solr.util.plugin.NamedListInitializedPlugin; @@ -33,12 +35,16 @@ * * @since solr 1.3 */ -public abstract class SearchComponent implements SolrInfoMBean, NamedListInitializedPlugin +public abstract class SearchComponent implements SolrInfoBean, NamedListInitializedPlugin { /** * The name given to this component in solrconfig.xml file */ private String name = this.getClass().getName(); + + protected Set metricNames = new HashSet<>(); + protected MetricRegistry registry; + /** * Prepare the response. Guaranteed to be called before any SearchComponent {@link #process(org.apache.solr.handler.component.ResponseBuilder)} method. * Called for every incoming request. @@ -103,31 +109,24 @@ public String getName() { @Override public abstract String getDescription(); - @Override - public String getSource() { return null; } - - @Override - public String getVersion() { - return getClass().getPackage().getSpecificationVersion(); - } - + @Override public Category getCategory() { return Category.OTHER; } @Override - public URL[] getDocs() { - return null; // this can be overridden, but not required + public Set getMetricNames() { + return metricNames; } @Override - public NamedList getStatistics() { - return null; + public MetricRegistry getMetricRegistry() { + return registry; } public static final Map> standard_components; - ; + static { HashMap> map = new HashMap<>(); diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java index a00839b1e855..8b732da62189 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java @@ -129,7 +129,7 @@ public void inform(SolrCore core) "First/Last components only valid if you do not declare 'components'"); if (shfInfo == null) { - shardHandlerFactory = core.getCoreDescriptor().getCoreContainer().getShardHandlerFactory(); + shardHandlerFactory = core.getCoreContainer().getShardHandlerFactory(); } else { shardHandlerFactory = core.createInitInstance(shfInfo, ShardHandlerFactory.class, null, null); core.addCloseHook(new CloseHook() { @@ -185,12 +185,12 @@ private void initComponents() { dbgCmp = (DebugComponent) comp; } else { components.add(comp); - log.debug("Adding component:"+comp); + log.debug("Adding component:{}", comp); } } if (makeDebugLast == true && dbgCmp != null){ components.add(dbgCmp); - log.debug("Adding debug component:" + dbgCmp); + log.debug("Adding debug component:{}", dbgCmp); } this.components = components; } @@ -211,7 +211,7 @@ public List getComponents() { private ShardHandler getAndPrepShardHandler(SolrQueryRequest req, ResponseBuilder rb) { ShardHandler shardHandler = null; - CoreContainer cc = req.getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer cc = req.getCore().getCoreContainer(); boolean isZkAware = cc.isZooKeeperAware(); rb.isDistrib = req.getParams().getBool(DISTRIB, isZkAware); if (!rb.isDistrib) { diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java b/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java index 4ad882c8e99f..656372a4d72a 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java @@ -32,16 +32,15 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.shape.Shape; import org.apache.lucene.spatial.prefix.HeatmapFacetCounter; import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.FixedBitSet; import org.apache.solr.common.SolrException; -import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.FacetParams; +import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; @@ -51,10 +50,13 @@ import org.apache.solr.schema.SchemaField; import org.apache.solr.schema.SpatialRecursivePrefixTreeFieldType; import org.apache.solr.search.BitDocSet; +import org.apache.solr.search.DocIterator; import org.apache.solr.search.DocSet; -import org.apache.solr.search.QueryParsing; +import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.util.DistanceUnits; import org.apache.solr.util.SpatialUtils; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Shape; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -134,32 +136,13 @@ public static NamedList getHeatmapForField(String fieldKey, String field gridLevel = strategy.getGrid().getLevelForDistance(distErr); } - // Turn docSet into Bits - Bits topAcceptDocs; - if (docSet instanceof BitDocSet) { - BitDocSet set = (BitDocSet) docSet; - topAcceptDocs = set.getBits(); - } else { - topAcceptDocs = new Bits() { - @Override - public boolean get(int index) { - return docSet.exists(index); - } - - @Override - public int length() { - return rb.req.getSearcher().maxDoc(); - } - }; - } - //Compute! final HeatmapFacetCounter.Heatmap heatmap; try { heatmap = HeatmapFacetCounter.calcFacets( strategy, rb.req.getSearcher().getTopReaderContext(), - topAcceptDocs, + getTopAcceptDocs(docSet, rb.req.getSearcher()), // turn DocSet into Bits boundsShape, gridLevel, params.getFieldInt(fieldKey, FacetParams.FACET_HEATMAP_MAX_CELLS, 100_000) // will throw if exceeded @@ -190,6 +173,23 @@ public int length() { return result; } + private static Bits getTopAcceptDocs(DocSet docSet, SolrIndexSearcher searcher) throws IOException { + if (searcher.getLiveDocs() == docSet) { + return null; // means match everything (all live docs). This can speedup things a lot. + } else if (docSet.size() == 0) { + return new Bits.MatchNoBits(searcher.maxDoc()); // can speedup things a lot + } else if (docSet instanceof BitDocSet) { + return ((BitDocSet) docSet).getBits(); + } else { + // TODO DocSetBase.calcBits ought to be at DocSet level? + FixedBitSet bits = new FixedBitSet(searcher.maxDoc()); + for (DocIterator iter = docSet.iterator(); iter.hasNext();) { + bits.set(iter.nextDoc()); + } + return bits; + } + } + private static void formatCountsAndAddToNL(String fieldKey, ResponseBuilder rb, SolrParams params, int columns, int rows, int[] counts, NamedList result) { final String format = params.getFieldParam(fieldKey, FacetParams.FACET_HEATMAP_FORMAT, FORMAT_INTS2D); @@ -290,38 +290,20 @@ public static LinkedHashMap distribParse(SolrParams params, * {@link org.apache.solr.handler.component.SearchComponent#modifyRequest(ResponseBuilder, SearchComponent, ShardRequest)}. */ public static void distribModifyRequest(ShardRequest sreq, LinkedHashMap heatmapFacets) { // Set the format to PNG because it's compressed and it's the only format we have code to read at the moment. - // Changing a param is sadly tricky because field-specific params can show up as local-params (highest precedence) - // or as f.key.facet.heatmap.whatever. Ugh. So we re-write the facet.heatmap list with the local-params - // moved out to the "f.key." prefix, but we need to keep the key local-param because that's the only way to - // set an output key. This approach means we only need to know about the parameter we're changing, not of - // all possible heatmap params. + // We re-write the facet.heatmap list with PNG format in local-params where it has highest precedence. //Remove existing heatmap field param vals; we will rewrite sreq.params.remove(FacetParams.FACET_HEATMAP); - for (Map.Entry entry : heatmapFacets.entrySet()) { - final String key = entry.getKey(); - final HeatmapFacet facet = entry.getValue(); + for (HeatmapFacet facet : heatmapFacets.values()) { //add heatmap field param - if (!key.equals(facet.facetOn)) { - sreq.params.add(FacetParams.FACET_HEATMAP, - "{!" + CommonParams.OUTPUT_KEY + "=" + QueryParsing.encodeLocalParamVal(key) + "}" + facet.facetOn); - } else { - sreq.params.add(FacetParams.FACET_HEATMAP, facet.facetOn); - } - // Turn local-params into top-level f.key.param=value style params + ModifiableSolrParams newLocalParams = new ModifiableSolrParams(); if (facet.localParams != null) { - final Iterator localNameIter = facet.localParams.getParameterNamesIterator(); - while (localNameIter.hasNext()) { - String pname = localNameIter.next(); - if (!pname.startsWith(FacetParams.FACET_HEATMAP)) { - continue; // could be 'key', or 'v' even - } - String pval = facet.localParams.get(pname); - sreq.params.set("f." + key + "." + pname, pval); - } + newLocalParams.add(facet.localParams); } // Set format to PNG; it's the only one we parse - sreq.params.set("f." + key + "." + FacetParams.FACET_HEATMAP_FORMAT, FORMAT_PNG); + newLocalParams.set(FacetParams.FACET_HEATMAP_FORMAT, FORMAT_PNG); + sreq.params.add(FacetParams.FACET_HEATMAP, + newLocalParams.toLocalParamsString() + facet.facetOn); } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java index 2f805f45d029..4e3cd125c27e 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java @@ -853,7 +853,7 @@ public Map getSpellCheckers() { } // /////////////////////////////////////////// - // / SolrInfoMBean + // / SolrInfoBean // ////////////////////////////////////////// @Override diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java index 6a6e9bef0d6c..8ecd51c523a2 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java @@ -160,7 +160,7 @@ public static NamedList> unwrapStats(NamedList suggesters = new ConcurrentHashMap<>(); - + /** Container for various labels used in the responses generated by this component */ private static class SuggesterResultLabels { static final String SUGGEST = "suggest"; @@ -345,16 +348,18 @@ public String getDescription() { } @Override - public NamedList getStatistics() { - NamedList stats = new SimpleOrderedMap<>(); - stats.add("totalSizeInBytes", String.valueOf(ramBytesUsed())); - for (Map.Entry entry : suggesters.entrySet()) { - SolrSuggester suggester = entry.getValue(); - stats.add(entry.getKey(), suggester.toString()); - } - return stats; + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + manager.registerGauge(this, registryName, () -> ramBytesUsed(), true, "totalSizeInBytes", getCategory().toString(), scope); + MetricsMap suggestersMap = new MetricsMap((detailed, map) -> { + for (Map.Entry entry : suggesters.entrySet()) { + SolrSuggester suggester = entry.getValue(); + map.put(entry.getKey(), suggester.toString()); + } + }); + manager.registerGauge(this, registryName, suggestersMap, true, "suggesters", getCategory().toString(), scope); } - + @Override public long ramBytesUsed() { long sizeInBytes = 0; diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java index b05939e511c8..8a735d13f5ac 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java @@ -41,10 +41,10 @@ /** * Return TermEnum information, useful for things like auto suggest. - * + * *
      * <searchComponent name="termsComponent" class="solr.TermsComponent"/>
    - * 
    + *
      * <requestHandler name="/terms" class="solr.SearchHandler">
      *   <lst name="defaults">
      *     <bool name="terms">true</bool>
    @@ -101,8 +101,8 @@ public void process(ResponseBuilder rb) throws IOException {
     
         boolean termStats = params.getBool(TermsParams.TERMS_STATS, false);
     
    -    if(termStats) {
    -      NamedList stats = new SimpleOrderedMap();
    +    if (termStats) {
    +      NamedList stats = new SimpleOrderedMap<>();
           rb.rsp.add("indexstats", stats);
           collectStats(rb.req.getSearcher(), stats);
         }
    @@ -248,7 +248,7 @@ public void process(ResponseBuilder rb) throws IOException {
           if (sort) {
             for (CountPair item : queue) {
               if (i >= limit) break;
    -          ft.indexedToReadable(item.key, external);          
    +          ft.indexedToReadable(item.key, external);
               fieldTerms.add(external.toString(), item.val);
               i++;
             }
    @@ -335,7 +335,7 @@ public void finishStage(ResponseBuilder rb) {
         rb._termsHelper = null;
       }
     
    -  private ShardRequest createShardQuery(SolrParams params) {
    +  private static ShardRequest createShardQuery(SolrParams params) {
         ShardRequest sreq = new ShardRequest();
         sreq.purpose = ShardRequest.PURPOSE_GET_TERMS;
     
    @@ -384,11 +384,11 @@ public void parse(NamedList> terms) {
           }
     
           TermsResponse termsResponse = new TermsResponse(terms);
    -      
    +
           // loop though each field and add each term+freq to map
           for (String key : fieldmap.keySet()) {
             HashMap termmap = fieldmap.get(key);
    -        List termlist = termsResponse.getTerms(key); 
    +        List termlist = termsResponse.getTerms(key);
     
             // skip this field if there are no terms
             if (termlist == null) {
    @@ -410,7 +410,7 @@ public void parse(NamedList> terms) {
           }
         }
     
    -    public NamedList buildResponse() {
    +    public NamedList buildResponse() {
           NamedList response = new SimpleOrderedMap<>();
     
           // determine if we are going index or count sort
    @@ -459,8 +459,8 @@ public NamedList buildResponse() {
               if (tc.getFrequency() >= freqmin && tc.getFrequency() <= freqmax) {
                 if (includeTotalTermFreq) {
                   NamedList termStats = new SimpleOrderedMap<>();
    -              termStats.add("docFreq", tc.getFrequency());
    -              termStats.add("totalTermFreq", tc.getTotalTermFreq());
    +              termStats.add("df", tc.getFrequency());
    +              termStats.add("ttf", tc.getTotalTermFreq());
                   fieldterms.add(tc.getTerm(), termStats);
                 } else {
                   fieldterms.add(tc.getTerm(), num(tc.getFrequency()));
    @@ -480,7 +480,7 @@ public NamedList buildResponse() {
         }
     
         // use  tags for smaller facet counts (better back compatibility)
    -    private Number num(long val) {
    +    private static Number num(long val) {
           if (val < Integer.MAX_VALUE) return (int) val;
           else return val;
         }
    @@ -515,53 +515,51 @@ public TermsResponse.Term[] getCountSorted(HashMap d
         }
       }
     
    -  private void fetchTerms(SolrIndexSearcher indexSearcher,
    -                          String[] fields,
    -                          String termList,
    -                          boolean includeTotalTermFreq,
    -                          NamedList result) throws IOException {
    -
    -    String field = fields[0];
    -    FieldType fieldType = indexSearcher.getSchema().getField(field).getType();
    +  private static void fetchTerms(SolrIndexSearcher indexSearcher, String[] fields, String termList,
    +      boolean includeTotalTermFreq, NamedList result) throws IOException {
         String[] splitTerms = termList.split(",");
    -
    -    for(int i=0; i termStats = new SimpleOrderedMap<>();
    -          termStats.add("docFreq", (long) docFreq);
    -          termStats.add("totalTermFreq", totalTermFreq);
    -          termsMap.add(outTerm, termStats);
    +      TermContext[] termContexts = new TermContext[terms.length];
    +      collectTermContext(topReaderContext, termContexts, terms);
    +
    +      NamedList termsMap = new SimpleOrderedMap<>();
    +      for (int i = 0; i < terms.length; i++) {
    +        if (termContexts[i] != null) {
    +          String outTerm = fieldType.indexedToReadable(terms[i].bytes().utf8ToString());
    +          int docFreq = termContexts[i].docFreq();
    +          if (!includeTotalTermFreq) {
    +            termsMap.add(outTerm, docFreq);
    +          } else {
    +            long totalTermFreq = termContexts[i].totalTermFreq();
    +            NamedList termStats = new SimpleOrderedMap<>();
    +            termStats.add("df", (long) docFreq);
    +            termStats.add("ttf", totalTermFreq);
    +            termsMap.add(outTerm, termStats);
    +          }
             }
           }
    -    }
     
    -    result.add(field, termsMap);
    +      result.add(field, termsMap);
    +    }
       }
     
    -  private void collectTermContext(IndexReaderContext topReaderContext, TermContext[] contextArray, Term[] queryTerms)
    -      throws IOException {
    +  private static void collectTermContext(IndexReaderContext topReaderContext, TermContext[] contextArray,
    +      Term[] queryTerms) throws IOException {
         TermsEnum termsEnum = null;
         for (LeafReaderContext context : topReaderContext.leaves()) {
           final Fields fields = context.reader().fields();
    @@ -589,7 +587,7 @@ private void collectTermContext(IndexReaderContext topReaderContext, TermContext
         }
       }
     
    -  private void collectStats(SolrIndexSearcher searcher, NamedList stats) {
    +  private static void collectStats(SolrIndexSearcher searcher, NamedList stats) {
         int numDocs = searcher.getTopReaderContext().reader().numDocs();
         stats.add("numDocs", Long.valueOf(numDocs));
       }
    diff --git a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
    index 24304d0a1b63..7e56ee44e58c 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
    @@ -128,40 +128,58 @@ public void init(PluginInfo info) {
     
         // Load the fragmenters
         SolrFragmenter frag = solrCore.initPlugins(info.getChildren("fragmenter") , fragmenters,SolrFragmenter.class,null);
    -    if (frag == null) frag = new GapFragmenter();
    +    if (frag == null) {
    +      frag = new GapFragmenter();
    +      solrCore.initDefaultPlugin(frag, SolrFragmenter.class);
    +    }
         fragmenters.put("", frag);
         fragmenters.put(null, frag);
     
         // Load the formatters
         SolrFormatter fmt = solrCore.initPlugins(info.getChildren("formatter"), formatters,SolrFormatter.class,null);
    -    if (fmt == null) fmt = new HtmlFormatter();
    +    if (fmt == null) {
    +      fmt = new HtmlFormatter();
    +      solrCore.initDefaultPlugin(fmt, SolrFormatter.class);
    +    }
         formatters.put("", fmt);
         formatters.put(null, fmt);
     
         // Load the encoders
         SolrEncoder enc = solrCore.initPlugins(info.getChildren("encoder"), encoders,SolrEncoder.class,null);
    -    if (enc == null) enc = new DefaultEncoder();
    +    if (enc == null) {
    +      enc = new DefaultEncoder();
    +      solrCore.initDefaultPlugin(enc, SolrEncoder.class);
    +    }
         encoders.put("", enc);
         encoders.put(null, enc);
     
         // Load the FragListBuilders
         SolrFragListBuilder fragListBuilder = solrCore.initPlugins(info.getChildren("fragListBuilder"),
             fragListBuilders, SolrFragListBuilder.class, null );
    -    if( fragListBuilder == null ) fragListBuilder = new SimpleFragListBuilder();
    +    if( fragListBuilder == null ) {
    +      fragListBuilder = new SimpleFragListBuilder();
    +      solrCore.initDefaultPlugin(fragListBuilder, SolrFragListBuilder.class);
    +    }
         fragListBuilders.put( "", fragListBuilder );
         fragListBuilders.put( null, fragListBuilder );
     
         // Load the FragmentsBuilders
         SolrFragmentsBuilder fragsBuilder = solrCore.initPlugins(info.getChildren("fragmentsBuilder"),
             fragmentsBuilders, SolrFragmentsBuilder.class, null);
    -    if( fragsBuilder == null ) fragsBuilder = new ScoreOrderFragmentsBuilder();
    +    if( fragsBuilder == null ) {
    +      fragsBuilder = new ScoreOrderFragmentsBuilder();
    +      solrCore.initDefaultPlugin(fragsBuilder, SolrFragmentsBuilder.class);
    +    }
         fragmentsBuilders.put( "", fragsBuilder );
         fragmentsBuilders.put( null, fragsBuilder );
     
         // Load the BoundaryScanners
         SolrBoundaryScanner boundaryScanner = solrCore.initPlugins(info.getChildren("boundaryScanner"),
             boundaryScanners, SolrBoundaryScanner.class, null);
    -    if(boundaryScanner == null) boundaryScanner = new SimpleBoundaryScanner();
    +    if(boundaryScanner == null) {
    +      boundaryScanner = new SimpleBoundaryScanner();
    +      solrCore.initDefaultPlugin(boundaryScanner, SolrBoundaryScanner.class);
    +    }
         boundaryScanners.put("", boundaryScanner);
         boundaryScanners.put(null, boundaryScanner);
     
    diff --git a/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java b/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java
    index 64cb280a25af..6a11bb9018c2 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java
    @@ -30,7 +30,7 @@ public class GapFragmenter extends HighlightingPluginBase implements SolrFragmen
       @Override
       public Fragmenter getFragmenter(String fieldName, SolrParams params )
       {
    -    numRequests++;
    +    numRequests.inc();
         params = SolrParams.wrapDefaults(params, defaults);
         
         int fragsize = params.getFieldInt( fieldName, HighlightParams.FRAGSIZE, 100 );
    diff --git a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
    index f60ada82d1b6..7acaacdd03c9 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
    @@ -16,21 +16,27 @@
      */
     package org.apache.solr.highlight;
     
    -import java.net.URL;
    +import java.util.HashSet;
    +import java.util.Set;
     
    +import com.codahale.metrics.Counter;
    +import com.codahale.metrics.MetricRegistry;
     import org.apache.solr.common.params.SolrParams;
     import org.apache.solr.common.util.NamedList;
    -import org.apache.solr.common.util.SimpleOrderedMap;
    -import org.apache.solr.core.SolrInfoMBean;
    +import org.apache.solr.core.SolrInfoBean;
    +import org.apache.solr.metrics.SolrMetricManager;
    +import org.apache.solr.metrics.SolrMetricProducer;
     
     /**
      * 
      * @since solr 1.3
      */
    -public abstract class HighlightingPluginBase implements SolrInfoMBean
    +public abstract class HighlightingPluginBase implements SolrInfoBean, SolrMetricProducer
     {
    -  protected long numRequests;
    +  protected Counter numRequests;
       protected SolrParams defaults;
    +  protected Set metricNames = new HashSet<>(1);
    +  protected MetricRegistry registry;
     
       public void init(NamedList args) {
         if( args != null ) {
    @@ -50,14 +56,7 @@ public String getName() {
     
       @Override
       public abstract String getDescription();
    -  @Override
    -  public String getSource() { return null; }
    -  
    -  @Override
    -  public String getVersion() {
    -    return getClass().getPackage().getSpecificationVersion();
    -  }
    -  
    +
       @Override
       public Category getCategory()
       {
    @@ -65,15 +64,19 @@ public Category getCategory()
       }
     
       @Override
    -  public URL[] getDocs() {
    -    return null;  // this can be overridden, but not required
    +  public Set getMetricNames() {
    +    return metricNames;
    +  }
    +
    +  @Override
    +  public MetricRegistry getMetricRegistry() {
    +    return registry;
       }
     
       @Override
    -  public NamedList getStatistics() {
    -    NamedList lst = new SimpleOrderedMap<>();
    -    lst.add("requests", numRequests);
    -    return lst;
    +  public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) {
    +    registry = manager.registry(registryName);
    +    numRequests = manager.counter(this, registryName, "requests", getCategory().toString(), scope);
       }
     }
     
    diff --git a/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java b/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java
    index 842d5cdbfe6c..0950c53e7ef8 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java
    @@ -29,7 +29,7 @@ public class HtmlFormatter extends HighlightingPluginBase implements SolrFormatt
       @Override
       public Formatter getFormatter(String fieldName, SolrParams params ) 
       {
    -    numRequests++;
    +    numRequests.inc();
         params = SolrParams.wrapDefaults(params, defaults);
     
         return new SimpleHTMLFormatter(
    diff --git a/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java b/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java
    index b755b2d0a92b..ffefbad33ace 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java
    @@ -60,7 +60,7 @@ public void init(NamedList args) {
       @Override
       public Fragmenter getFragmenter(String fieldName, SolrParams params )
       { 
    -    numRequests++;
    +    numRequests.inc();
         params = SolrParams.wrapDefaults(params, defaults);
     
         int fragsize  = params.getFieldInt(   fieldName, HighlightParams.FRAGSIZE,  LuceneRegexFragmenter.DEFAULT_FRAGMENT_SIZE );
    diff --git a/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java b/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java
    index ed5430ce1e66..7e30a9231aec 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java
    @@ -28,7 +28,7 @@ public FragListBuilder getFragListBuilder(SolrParams params) {
         // If that ever changes, it should wrap them with defaults...
         // params = SolrParams.wrapDefaults(params, defaults)
     
    -    numRequests++;
    +    numRequests.inc();
     
         return new org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder();
       }
    diff --git a/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java b/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java
    index 0b79929b35d1..0dfa16e454a7 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java
    @@ -28,7 +28,7 @@ public FragListBuilder getFragListBuilder(SolrParams params) {
         // If that ever changes, it should wrap them with defaults...
         // params = SolrParams.wrapDefaults(params, defaults)
     
    -    numRequests++;
    +    numRequests.inc();
     
         return new org.apache.lucene.search.vectorhighlight.SingleFragListBuilder();
       }
    diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java b/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java
    index 6f442f72bf1a..ddbbfdeb88b0 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java
    @@ -18,14 +18,14 @@
     
     import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
     import org.apache.solr.common.params.SolrParams;
    -import org.apache.solr.core.SolrInfoMBean;
    +import org.apache.solr.core.SolrInfoBean;
     import org.apache.solr.util.plugin.NamedListInitializedPlugin;
     
     public abstract class SolrBoundaryScanner extends HighlightingPluginBase implements
    -    SolrInfoMBean, NamedListInitializedPlugin {
    +    SolrInfoBean, NamedListInitializedPlugin {
     
       public BoundaryScanner getBoundaryScanner(String fieldName, SolrParams params){
    -    numRequests++;
    +    numRequests.inc();
         params = SolrParams.wrapDefaults(params, defaults);
     
         return get(fieldName, params);
    diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrEncoder.java b/solr/core/src/java/org/apache/solr/highlight/SolrEncoder.java
    index 9f49228805d6..7b78a06969fa 100644
    --- a/solr/core/src/java/org/apache/solr/highlight/SolrEncoder.java
    +++ b/solr/core/src/java/org/apache/solr/highlight/SolrEncoder.java
    @@ -19,10 +19,10 @@
     import org.apache.lucene.search.highlight.Encoder;
     import org.apache.solr.common.params.SolrParams;
     import org.apache.solr.common.util.NamedList;
    -import org.apache.solr.core.SolrInfoMBean;
    +import org.apache.solr.core.SolrInfoBean;
     import org.apache.solr.util.plugin.NamedListInitializedPlugin;
     
    -public interface SolrEncoder extends SolrInfoMBean, NamedListInitializedPlugin {
    +public interface SolrEncoder extends SolrInfoBean, NamedListInitializedPlugin {
     
       /** init will be called just once, immediately after creation.
        * 

    The args are user-level initialization parameters that diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrFormatter.java b/solr/core/src/java/org/apache/solr/highlight/SolrFormatter.java index a8f51dbcd464..1a6443e6deff 100644 --- a/solr/core/src/java/org/apache/solr/highlight/SolrFormatter.java +++ b/solr/core/src/java/org/apache/solr/highlight/SolrFormatter.java @@ -19,10 +19,10 @@ import org.apache.lucene.search.highlight.Formatter; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.util.plugin.NamedListInitializedPlugin; -public interface SolrFormatter extends SolrInfoMBean, NamedListInitializedPlugin { +public interface SolrFormatter extends SolrInfoBean, NamedListInitializedPlugin { /** init will be called just once, immediately after creation. *

    The args are user-level initialization parameters that diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrFragListBuilder.java b/solr/core/src/java/org/apache/solr/highlight/SolrFragListBuilder.java index f0c36b4d6028..87da23513b01 100644 --- a/solr/core/src/java/org/apache/solr/highlight/SolrFragListBuilder.java +++ b/solr/core/src/java/org/apache/solr/highlight/SolrFragListBuilder.java @@ -19,10 +19,10 @@ import org.apache.lucene.search.vectorhighlight.FragListBuilder; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.util.plugin.NamedListInitializedPlugin; -public interface SolrFragListBuilder extends SolrInfoMBean, NamedListInitializedPlugin { +public interface SolrFragListBuilder extends SolrInfoBean, NamedListInitializedPlugin { /** init will be called just once, immediately after creation. *

    The args are user-level initialization parameters that diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrFragmenter.java b/solr/core/src/java/org/apache/solr/highlight/SolrFragmenter.java index 547506f5cf1c..98c3056993df 100644 --- a/solr/core/src/java/org/apache/solr/highlight/SolrFragmenter.java +++ b/solr/core/src/java/org/apache/solr/highlight/SolrFragmenter.java @@ -19,10 +19,10 @@ import org.apache.lucene.search.highlight.Fragmenter; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.util.plugin.NamedListInitializedPlugin; -public interface SolrFragmenter extends SolrInfoMBean, NamedListInitializedPlugin { +public interface SolrFragmenter extends SolrInfoBean, NamedListInitializedPlugin { /** init will be called just once, immediately after creation. *

    The args are user-level initialization parameters that diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java b/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java index 78ea5a4deee5..023d55ae3914 100644 --- a/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java +++ b/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java @@ -21,11 +21,11 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.params.HighlightParams; import org.apache.solr.common.params.SolrParams; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.util.plugin.NamedListInitializedPlugin; public abstract class SolrFragmentsBuilder extends HighlightingPluginBase - implements SolrInfoMBean, NamedListInitializedPlugin { + implements SolrInfoBean, NamedListInitializedPlugin { public static final String DEFAULT_PRE_TAGS = ""; public static final String DEFAULT_POST_TAGS = ""; @@ -37,7 +37,7 @@ public abstract class SolrFragmentsBuilder extends HighlightingPluginBase * @return An appropriate {@link org.apache.lucene.search.vectorhighlight.FragmentsBuilder}. */ public FragmentsBuilder getFragmentsBuilder(SolrParams params, BoundaryScanner bs) { - numRequests++; + numRequests.inc(); params = SolrParams.wrapDefaults(params, defaults); return getFragmentsBuilder( params, getPreTags( params, null ), getPostTags( params, null ), bs ); diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java index e526c57e52a2..ade4c6bda016 100644 --- a/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java +++ b/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java @@ -24,9 +24,9 @@ import org.apache.solr.util.SolrPluginUtils; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; -import java.util.List; +import java.util.LinkedHashSet; +import java.util.Set; public abstract class SolrHighlighter { @@ -60,27 +60,19 @@ public String[] getHighlightFields(Query query, SolrQueryRequest request, String if (emptyArray(defaultFields)) { String defaultSearchField = request.getSchema().getDefaultSearchFieldName(); fields = null == defaultSearchField ? new String[]{} : new String[]{defaultSearchField}; - } - else { + } else { fields = defaultFields; } - } - else if (fields.length == 1) { - if (fields[0].contains("*")) { - // create a Java regular expression from the wildcard string - String fieldRegex = fields[0].replaceAll("\\*", ".*"); - Collection storedHighlightFieldNames = request.getSearcher().getDocFetcher().getStoredHighlightFieldNames(); - List storedFieldsToHighlight = new ArrayList<>(); - for (String storedFieldName: storedHighlightFieldNames) { - if (storedFieldName.matches(fieldRegex)) { - storedFieldsToHighlight.add(storedFieldName); - } - } - fields = storedFieldsToHighlight.toArray(new String[storedFieldsToHighlight.size()]); - } else { - // if there's a single request/handler value, it may be a space/comma separated list - fields = SolrPluginUtils.split(fields[0]); + } else { + Set expandedFields = new LinkedHashSet(); + Collection storedHighlightFieldNames = request.getSearcher().getDocFetcher().getStoredHighlightFieldNames(); + for (String field : fields) { + expandWildcardsInHighlightFields( + expandedFields, + storedHighlightFieldNames, + SolrPluginUtils.split(field)); } + fields = expandedFields.toArray(new String[]{}); } // Trim them now in case they haven't been yet. Not needed for all code-paths above but do it here. @@ -94,6 +86,25 @@ protected boolean emptyArray(String[] arr) { return (arr == null || arr.length == 0 || arr[0] == null || arr[0].trim().length() == 0); } + static private void expandWildcardsInHighlightFields ( + Set expandedFields, + Collection storedHighlightFieldNames, + String... fields) { + for (String field : fields) { + if (field.contains("*")) { + // create a Java regular expression from the wildcard string + String fieldRegex = field.replaceAll("\\*", ".*"); + for (String storedFieldName : storedHighlightFieldNames) { + if (storedFieldName.matches(fieldRegex)) { + expandedFields.add(storedFieldName); + } + } + } else { + expandedFields.add(field); + } + } + } + /** * Generates a list of Highlighted query fragments for each item in a list * of documents, or returns null if highlighting is disabled. diff --git a/solr/core/src/java/org/apache/solr/highlight/WeightedFragListBuilder.java b/solr/core/src/java/org/apache/solr/highlight/WeightedFragListBuilder.java index f44c0f0b430c..b97cc31c89d0 100644 --- a/solr/core/src/java/org/apache/solr/highlight/WeightedFragListBuilder.java +++ b/solr/core/src/java/org/apache/solr/highlight/WeightedFragListBuilder.java @@ -28,7 +28,7 @@ public FragListBuilder getFragListBuilder(SolrParams params) { // If that ever changes, it should wrap them with defaults... // params = SolrParams.wrapDefaults(params, defaults) - numRequests++; + numRequests.inc(); return new org.apache.lucene.search.vectorhighlight.WeightedFragListBuilder(); } diff --git a/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java b/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java index 5ff77429156f..11c8b8472c28 100644 --- a/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java +++ b/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java @@ -34,7 +34,7 @@ /** * Set's per thread context info for logging. Nested calls will use the top level parent for all context. The first * caller always owns the context until it calls {@link #clear()}. Always call {@link #setCore(SolrCore)} or - * {@link #setCoreDescriptor(CoreDescriptor)} and then {@link #clear()} in a finally block. + * {@link #setCoreDescriptor(CoreContainer, CoreDescriptor)} and then {@link #clear()} in a finally block. */ public class MDCLoggingContext { // When a thread sets context and finds that the context is already set, we should noop and ignore the finally clear @@ -105,12 +105,11 @@ private static void setNodeName(String node) { public static void setCore(SolrCore core) { if (core != null) { - CoreDescriptor cd = core.getCoreDescriptor(); - setCoreDescriptor(cd); + setCoreDescriptor(core.getCoreContainer(), core.getCoreDescriptor()); } } - public static void setCoreDescriptor(CoreDescriptor cd) { + public static void setCoreDescriptor(CoreContainer coreContainer, CoreDescriptor cd) { if (cd != null) { int callDepth = CALL_DEPTH.get(); CALL_DEPTH.set(callDepth + 1); @@ -119,9 +118,8 @@ public static void setCoreDescriptor(CoreDescriptor cd) { } setCoreName(cd.getName()); - CoreContainer cc = cd.getCoreContainer(); - if (cc != null) { - ZkController zkController = cc.getZkController(); + if (coreContainer != null) { + ZkController zkController = coreContainer.getZkController(); if (zkController != null) { setNodeName(zkController.getNodeName()); } diff --git a/solr/core/src/java/org/apache/solr/metrics/AltBufferPoolMetricSet.java b/solr/core/src/java/org/apache/solr/metrics/AltBufferPoolMetricSet.java new file mode 100644 index 000000000000..f9d3a43b7dc0 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/metrics/AltBufferPoolMetricSet.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.metrics; + +import java.lang.management.BufferPoolMXBean; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricSet; + +/** + * This is an alternative implementation of {@link com.codahale.metrics.jvm.BufferPoolMetricSet} that + * doesn't need an MBean server. + */ +public class AltBufferPoolMetricSet implements MetricSet { + + @Override + public Map getMetrics() { + final Map metrics = new HashMap<>(); + List pools = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class); + for (final BufferPoolMXBean pool : pools) { + String name = pool.getName(); + metrics.put(name + ".Count", (Gauge)() -> pool.getCount()); + metrics.put(name + ".MemoryUsed", (Gauge)() -> pool.getMemoryUsed()); + metrics.put(name + ".TotalCapacity", (Gauge)() -> pool.getTotalCapacity()); + } + return metrics; + } +} diff --git a/solr/core/src/java/org/apache/solr/metrics/MetricsMap.java b/solr/core/src/java/org/apache/solr/metrics/MetricsMap.java new file mode 100644 index 000000000000..f43c60b5927a --- /dev/null +++ b/solr/core/src/java/org/apache/solr/metrics/MetricsMap.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.metrics; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.ReflectionException; +import javax.management.openmbean.OpenMBeanAttributeInfoSupport; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; +import java.lang.invoke.MethodHandles; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.function.BiConsumer; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.solr.common.SolrException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Dynamically constructed map of metrics, intentionally different from {@link com.codahale.metrics.MetricSet} + * where each metric had to be known in advance and registered separately in {@link com.codahale.metrics.MetricRegistry}. + *

    Note: this awkwardly extends {@link Gauge} and not {@link Metric} because awkwardly {@link Metric} instances + * are not supported by {@link com.codahale.metrics.MetricRegistryListener} :(

    + *

    Note 2: values added to this metric map should belong to the list of types supported by JMX: + * {@link javax.management.openmbean.OpenType#ALLOWED_CLASSNAMES_LIST}, otherwise only their toString() + * representation will be shown in JConsole.

    + */ +public class MetricsMap implements Gauge>, DynamicMBean { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + // set to true to use cached statistics between getMBeanInfo calls to work + // around over calling getStatistics on MBeanInfos when iterating over all attributes (SOLR-6586) + private final boolean useCachedStatsBetweenGetMBeanInfoCalls = Boolean.getBoolean("useCachedStatsBetweenGetMBeanInfoCalls"); + + private BiConsumer> initializer; + private volatile Map cachedValue; + + public MetricsMap(BiConsumer> initializer) { + this.initializer = initializer; + } + + @Override + public Map getValue() { + return getValue(true); + } + + public Map getValue(boolean detailed) { + Map map = new HashMap<>(); + initializer.accept(detailed, map); + return map; + } + + public String toString() { + return getValue().toString(); + } + + @Override + public Object getAttribute(String attribute) throws AttributeNotFoundException, MBeanException, ReflectionException { + Object val; + Map stats = null; + if (useCachedStatsBetweenGetMBeanInfoCalls) { + Map cachedStats = this.cachedValue; + if (cachedStats != null) { + stats = cachedStats; + } + } + if (stats == null) { + stats = getValue(true); + } + val = stats.get(attribute); + + if (val != null) { + // It's String or one of the simple types, just return it as JMX suggests direct support for such types + for (String simpleTypeName : SimpleType.ALLOWED_CLASSNAMES_LIST) { + if (val.getClass().getName().equals(simpleTypeName)) { + return val; + } + } + // It's an arbitrary object which could be something complex and odd, return its toString, assuming that is + // a workable representation of the object + return val.toString(); + } + return null; + } + + @Override + public void setAttribute(Attribute attribute) throws AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException { + throw new UnsupportedOperationException("Operation not Supported"); + } + + @Override + public AttributeList getAttributes(String[] attributes) { + AttributeList list = new AttributeList(); + for (String attribute : attributes) { + try { + list.add(new Attribute(attribute, getAttribute(attribute))); + } catch (Exception e) { + log.warn("Could not get attribute " + attribute); + } + } + return list; + } + + @Override + public AttributeList setAttributes(AttributeList attributes) { + throw new UnsupportedOperationException("Operation not Supported"); + } + + @Override + public Object invoke(String actionName, Object[] params, String[] signature) throws MBeanException, ReflectionException { + throw new UnsupportedOperationException("Operation not Supported"); + } + + @Override + public MBeanInfo getMBeanInfo() { + ArrayList attrInfoList = new ArrayList<>(); + Map stats = getValue(true); + if (useCachedStatsBetweenGetMBeanInfoCalls) { + cachedValue = stats; + } + try { + stats.forEach((k, v) -> { + Class type = v.getClass(); + OpenType typeBox = determineType(type); + if (type.equals(String.class) || typeBox == null) { + attrInfoList.add(new MBeanAttributeInfo(k, String.class.getName(), + null, true, false, false)); + } else { + attrInfoList.add(new OpenMBeanAttributeInfoSupport( + k, k, typeBox, true, false, false)); + } + }); + } catch (Exception e) { + // don't log issue if the core is closing + if (!(SolrException.getRootCause(e) instanceof AlreadyClosedException)) + log.warn("Could not get attributes of MetricsMap: {}", this, e); + } + MBeanAttributeInfo[] attrInfoArr = attrInfoList + .toArray(new MBeanAttributeInfo[attrInfoList.size()]); + return new MBeanInfo(getClass().getName(), "MetricsMap", attrInfoArr, null, null, null); + } + + private OpenType determineType(Class type) { + try { + for (Field field : SimpleType.class.getFields()) { + if (field.getType().equals(SimpleType.class)) { + SimpleType candidate = (SimpleType) field.get(SimpleType.class); + if (candidate.getTypeName().equals(type.getName())) { + return candidate; + } + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return null; + } +} \ No newline at end of file diff --git a/solr/core/src/java/org/apache/solr/metrics/OperatingSystemMetricSet.java b/solr/core/src/java/org/apache/solr/metrics/OperatingSystemMetricSet.java index 34ef5d1c2e67..21957eb22850 100644 --- a/solr/core/src/java/org/apache/solr/metrics/OperatingSystemMetricSet.java +++ b/solr/core/src/java/org/apache/solr/metrics/OperatingSystemMetricSet.java @@ -16,77 +16,31 @@ */ package org.apache.solr.metrics; -import javax.management.JMException; -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanInfo; -import javax.management.MBeanServer; -import javax.management.ObjectName; -import java.lang.invoke.MethodHandles; +import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; -import java.util.Set; -import com.codahale.metrics.JmxAttributeGauge; import com.codahale.metrics.Metric; import com.codahale.metrics.MetricSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.solr.util.stats.MetricUtils; /** * This is an extended replacement for {@link com.codahale.metrics.jvm.FileDescriptorRatioGauge} - * - that class uses reflection and doesn't work under Java 9. We can also get much more - * information about OS environment once we have to go through MBeanServer anyway. + * - that class uses reflection and doesn't work under Java 9. This implementation tries to retrieve + * bean properties from known implementations of {@link java.lang.management.OperatingSystemMXBean}. */ public class OperatingSystemMetricSet implements MetricSet { - private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - - /** Metric names - these correspond to known numeric MBean attributes. Depending on the OS and - * Java implementation only some of them may be actually present. - */ - public static final String[] METRICS = { - "AvailableProcessors", - "CommittedVirtualMemorySize", - "FreePhysicalMemorySize", - "FreeSwapSpaceSize", - "MaxFileDescriptorCount", - "OpenFileDescriptorCount", - "ProcessCpuLoad", - "ProcessCpuTime", - "SystemLoadAverage", - "TotalPhysicalMemorySize", - "TotalSwapSpaceSize" - }; - - private final MBeanServer mBeanServer; - - public OperatingSystemMetricSet(MBeanServer mBeanServer) { - this.mBeanServer = mBeanServer; - } @Override public Map getMetrics() { final Map metrics = new HashMap<>(); - - try { - final ObjectName on = new ObjectName("java.lang:type=OperatingSystem"); - // verify that it exists - MBeanInfo info = mBeanServer.getMBeanInfo(on); - // collect valid attributes - Set attributes = new HashSet<>(); - for (MBeanAttributeInfo ai : info.getAttributes()) { - attributes.add(ai.getName()); - } - for (String metric : METRICS) { - // verify that an attribute exists before attempting to add it - if (attributes.contains(metric)) { - metrics.put(metric, new JmxAttributeGauge(mBeanServer, on, metric)); - } + OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean(); + MetricUtils.addMXBeanMetrics(os, MetricUtils.OS_MXBEAN_CLASSES, null, (k, v) -> { + if (!metrics.containsKey(k)) { + metrics.put(k, v); } - } catch (JMException ignored) { - log.debug("Unable to load OperatingSystem MBean", ignored); - } - + }); return metrics; } } diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java b/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java index 43f35352ebf4..8de053c65646 100644 --- a/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java +++ b/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java @@ -20,11 +20,12 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; +import com.codahale.metrics.MetricRegistry; import org.apache.solr.cloud.CloudDescriptor; import org.apache.solr.core.NodeConfig; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,7 +55,7 @@ public class SolrCoreMetricManager implements Closeable { public SolrCoreMetricManager(SolrCore core) { this.core = core; this.tag = String.valueOf(core.hashCode()); - this.metricManager = core.getCoreDescriptor().getCoreContainer().getMetricManager(); + this.metricManager = core.getCoreContainer().getMetricManager(); initCloudMode(); registryName = createRegistryName(cloudMode, collectionName, shardName, replicaName, core.getName()); leaderRegistryName = createLeaderRegistryName(cloudMode, collectionName, shardName); @@ -76,14 +77,14 @@ private void initCloudMode() { } /** - * Load reporters configured globally and specific to {@link org.apache.solr.core.SolrInfoMBean.Group#core} + * Load reporters configured globally and specific to {@link org.apache.solr.core.SolrInfoBean.Group#core} * group or with a registry name specific to this core. */ public void loadReporters() { - NodeConfig nodeConfig = core.getCoreDescriptor().getCoreContainer().getConfig(); + NodeConfig nodeConfig = core.getCoreContainer().getConfig(); PluginInfo[] pluginInfos = nodeConfig.getMetricReporterPlugins(); metricManager.loadReporters(pluginInfos, core.getResourceLoader(), tag, - SolrInfoMBean.Group.core, registryName); + SolrInfoBean.Group.core, registryName); if (cloudMode) { metricManager.loadShardReporters(pluginInfos, core); } @@ -126,12 +127,26 @@ public void registerMetricProducer(String scope, SolrMetricProducer producer) { producer.initializeMetrics(metricManager, getRegistryName(), scope); } + /** + * Return the registry used by this SolrCore. + */ + public MetricRegistry getRegistry() { + if (registryName != null) { + return metricManager.registry(registryName); + } else { + return null; + } + } + /** * Closes reporters specific to this core. */ @Override public void close() throws IOException { metricManager.closeReporters(getRegistryName(), tag); + if (getLeaderRegistryName() != null) { + metricManager.closeReporters(getLeaderRegistryName(), tag); + } } public SolrCore getCore() { @@ -176,9 +191,9 @@ public String getTag() { public static String createRegistryName(boolean cloud, String collectionName, String shardName, String replicaName, String coreName) { if (cloud) { // build registry name from logical names - return SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, collectionName, shardName, replicaName); + return SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, collectionName, shardName, replicaName); } else { - return SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, coreName); + return SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, coreName); } } @@ -224,7 +239,7 @@ public static String parseReplicaName(String collectionName, String coreName) { public static String createLeaderRegistryName(boolean cloud, String collectionName, String shardName) { if (cloud) { - return SolrMetricManager.getRegistryName(SolrInfoMBean.Group.collection, collectionName, shardName, "leader"); + return SolrMetricManager.getRegistryName(SolrInfoBean.Group.collection, collectionName, shardName, "leader"); } else { return null; } diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrMetricInfo.java b/solr/core/src/java/org/apache/solr/metrics/SolrMetricInfo.java index 4d093ebb43c0..8edfa0428094 100644 --- a/solr/core/src/java/org/apache/solr/metrics/SolrMetricInfo.java +++ b/solr/core/src/java/org/apache/solr/metrics/SolrMetricInfo.java @@ -17,7 +17,7 @@ package org.apache.solr.metrics; import com.codahale.metrics.MetricRegistry; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; /** * Wraps meta-data for a metric. @@ -25,7 +25,7 @@ public final class SolrMetricInfo { public final String name; public final String scope; - public final SolrInfoMBean.Category category; + public final SolrInfoBean.Category category; /** * Creates a new instance of {@link SolrMetricInfo}. @@ -34,7 +34,7 @@ public final class SolrMetricInfo { * @param scope the scope of the metric (e.g. `/admin/ping`) * @param name the name of the metric (e.g. `Requests`) */ - public SolrMetricInfo(SolrInfoMBean.Category category, String scope, String name) { + public SolrMetricInfo(SolrInfoBean.Category category, String scope, String name) { this.name = name; this.scope = scope; this.category = category; @@ -45,18 +45,25 @@ public static SolrMetricInfo of(String fullName) { return null; } String[] names = fullName.split("\\."); - if (names.length < 3) { // not a valid info + if (names.length < 2) { // not a valid info return null; } // check top-level name for valid category - SolrInfoMBean.Category category; + SolrInfoBean.Category category; try { - category = SolrInfoMBean.Category.valueOf(names[0]); + category = SolrInfoBean.Category.valueOf(names[0]); } catch (IllegalArgumentException e) { // not a valid category return null; } - String scope = names[1]; - String name = fullName.substring(names[0].length() + names[1].length() + 2); + String scope; + String name; + if (names.length == 2) { + scope = null; + name = fullName.substring(names[0].length() + 1); + } else { + scope = names[1]; + name = fullName.substring(names[0].length() + names[1].length() + 2); + } return new SolrMetricInfo(category, scope, name); } diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java b/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java index f4abee0cc92b..d4eb06ae7ded 100644 --- a/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java +++ b/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java @@ -51,7 +51,7 @@ import org.apache.solr.core.CoreContainer; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.metrics.reporters.solr.SolrClusterReporter; import org.apache.solr.metrics.reporters.solr.SolrShardReporter; @@ -69,11 +69,11 @@ * {@link MetricRegistry} instances are automatically created when first referenced by name. Similarly, * instances of {@link Metric} implementations, such as {@link Meter}, {@link Counter}, {@link Timer} and * {@link Histogram} are automatically created and registered under hierarchical names, in a specified - * registry, when {@link #meter(String, String, String...)} and other similar methods are called. + * registry, when {@link #meter(SolrInfoBean, String, String, String...)} and other similar methods are called. *

    This class enforces a common prefix ({@link #REGISTRY_NAME_PREFIX}) in all registry * names.

    *

    Solr uses several different registries for collecting metrics belonging to different groups, using - * {@link org.apache.solr.core.SolrInfoMBean.Group} as the main name of the registry (plus the + * {@link org.apache.solr.core.SolrInfoBean.Group} as the main name of the registry (plus the * above-mentioned prefix). Instances of {@link SolrMetricManager} are created for each {@link org.apache.solr.core.CoreContainer}, * and most registries are local to each instance, with the exception of two global registries: * solr.jetty and solr.jvm, which are shared between all {@link org.apache.solr.core.CoreContainer}-s

    @@ -87,11 +87,11 @@ public class SolrMetricManager { /** Registry name for Jetty-specific metrics. This name is also subject to overrides controlled by * system properties. This registry is shared between instances of {@link SolrMetricManager}. */ - public static final String JETTY_REGISTRY = REGISTRY_NAME_PREFIX + SolrInfoMBean.Group.jetty.toString(); + public static final String JETTY_REGISTRY = REGISTRY_NAME_PREFIX + SolrInfoBean.Group.jetty.toString(); /** Registry name for JVM-specific metrics. This name is also subject to overrides controlled by * system properties. This registry is shared between instances of {@link SolrMetricManager}. */ - public static final String JVM_REGISTRY = REGISTRY_NAME_PREFIX + SolrInfoMBean.Group.jvm.toString(); + public static final String JVM_REGISTRY = REGISTRY_NAME_PREFIX + SolrInfoBean.Group.jvm.toString(); private final ConcurrentMap registries = new ConcurrentHashMap<>(); @@ -247,6 +247,66 @@ public String toString() { } } + public static class OrFilter implements MetricFilter { + List filters = new ArrayList<>(); + + public OrFilter(Collection filters) { + if (filters != null) { + this.filters.addAll(filters); + } + } + + public OrFilter(MetricFilter... filters) { + if (filters != null) { + for (MetricFilter filter : filters) { + if (filter != null) { + this.filters.add(filter); + } + } + } + } + + @Override + public boolean matches(String s, Metric metric) { + for (MetricFilter filter : filters) { + if (filter.matches(s, metric)) { + return true; + } + } + return false; + } + } + + public static class AndFilter implements MetricFilter { + List filters = new ArrayList<>(); + + public AndFilter(Collection filters) { + if (filters != null) { + this.filters.addAll(filters); + } + } + + public AndFilter(MetricFilter... filters) { + if (filters != null) { + for (MetricFilter filter : filters) { + if (filter != null) { + this.filters.add(filter); + } + } + } + } + + @Override + public boolean matches(String s, Metric metric) { + for (MetricFilter filter : filters) { + if (!filter.matches(s, metric)) { + return false; + } + } + return true; + } + } + /** * Return a set of existing registry names. */ @@ -451,6 +511,21 @@ public Set clearMetrics(String registry, String... metricPath) { return filter.getMatched(); } + /** + * Retrieve matching metrics and their names. + * @param registry registry name. + * @param metricFilter filter (null is equivalent to {@link MetricFilter#ALL}). + * @return map of matching names and metrics + */ + public Map getMetrics(String registry, MetricFilter metricFilter) { + if (metricFilter == null || metricFilter == MetricFilter.ALL) { + return registry(registry).getMetrics(); + } + return registry(registry).getMetrics().entrySet().stream() + .filter(entry -> metricFilter.matches(entry.getKey(), entry.getValue())) + .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue())); + } + /** * Create or get an existing named {@link Meter} * @param registry registry name @@ -459,8 +534,12 @@ public Set clearMetrics(String registry, String... metricPath) { * @param metricPath (optional) additional top-most metric name path elements * @return existing or a newly created {@link Meter} */ - public Meter meter(String registry, String metricName, String... metricPath) { - return registry(registry).meter(mkName(metricName, metricPath)); + public Meter meter(SolrInfoBean info, String registry, String metricName, String... metricPath) { + final String name = mkName(metricName, metricPath); + if (info != null) { + info.registerMetricName(name); + } + return registry(registry).meter(name); } /** @@ -471,8 +550,12 @@ public Meter meter(String registry, String metricName, String... metricPath) { * @param metricPath (optional) additional top-most metric name path elements * @return existing or a newly created {@link Timer} */ - public Timer timer(String registry, String metricName, String... metricPath) { - return registry(registry).timer(mkName(metricName, metricPath)); + public Timer timer(SolrInfoBean info, String registry, String metricName, String... metricPath) { + final String name = mkName(metricName, metricPath); + if (info != null) { + info.registerMetricName(name); + } + return registry(registry).timer(name); } /** @@ -483,8 +566,12 @@ public Timer timer(String registry, String metricName, String... metricPath) { * @param metricPath (optional) additional top-most metric name path elements * @return existing or a newly created {@link Counter} */ - public Counter counter(String registry, String metricName, String... metricPath) { - return registry(registry).counter(mkName(metricName, metricPath)); + public Counter counter(SolrInfoBean info, String registry, String metricName, String... metricPath) { + final String name = mkName(metricName, metricPath); + if (info != null) { + info.registerMetricName(name); + } + return registry(registry).counter(name); } /** @@ -495,8 +582,12 @@ public Counter counter(String registry, String metricName, String... metricPath) * @param metricPath (optional) additional top-most metric name path elements * @return existing or a newly created {@link Histogram} */ - public Histogram histogram(String registry, String metricName, String... metricPath) { - return registry(registry).histogram(mkName(metricName, metricPath)); + public Histogram histogram(SolrInfoBean info, String registry, String metricName, String... metricPath) { + final String name = mkName(metricName, metricPath); + if (info != null) { + info.registerMetricName(name); + } + return registry(registry).histogram(name); } /** @@ -510,9 +601,12 @@ public Histogram histogram(String registry, String metricName, String... metricP * using dotted notation * @param metricPath (optional) additional top-most metric name path elements */ - public void register(String registry, Metric metric, boolean force, String metricName, String... metricPath) { + public void register(SolrInfoBean info, String registry, Metric metric, boolean force, String metricName, String... metricPath) { MetricRegistry metricRegistry = registry(registry); String fullName = mkName(metricName, metricPath); + if (info != null) { + info.registerMetricName(fullName); + } synchronized (metricRegistry) { if (force && metricRegistry.getMetrics().containsKey(fullName)) { metricRegistry.remove(fullName); @@ -521,8 +615,8 @@ public void register(String registry, Metric metric, boolean force, String metri } } - public void registerGauge(String registry, Gauge gauge, boolean force, String metricName, String... metricPath) { - register(registry, gauge, force, metricName, metricPath); + public void registerGauge(SolrInfoBean info, String registry, Gauge gauge, boolean force, String metricName, String... metricPath) { + register(info, registry, gauge, force, metricName, metricPath); } /** @@ -569,7 +663,7 @@ public static String mkName(String name, String... path) { * * NOTE: Once a registry is renamed in a way that its metrics are combined with another repository * it is no longer possible to retrieve the original metrics until this renaming is removed and the Solr - * {@link org.apache.solr.core.SolrInfoMBean.Group} of components that reported to that name is restarted. + * {@link org.apache.solr.core.SolrInfoBean.Group} of components that reported to that name is restarted. * @param registry The name of the registry * @return A potentially overridden (via System properties) registry name */ @@ -600,7 +694,7 @@ public static String enforcePrefix(String name) { * and the group parameter will be ignored. * @return fully-qualified and prefixed registry name, with overrides applied. */ - public static String getRegistryName(SolrInfoMBean.Group group, String... names) { + public static String getRegistryName(SolrInfoBean.Group group, String... names) { String fullName; String prefix = REGISTRY_NAME_PREFIX + group.toString() + "."; // check for existing prefix and group @@ -622,7 +716,7 @@ public static String getRegistryName(SolrInfoMBean.Group group, String... names) // reporter management /** - * Create and register {@link SolrMetricReporter}-s specific to a {@link org.apache.solr.core.SolrInfoMBean.Group}. + * Create and register {@link SolrMetricReporter}-s specific to a {@link org.apache.solr.core.SolrInfoBean.Group}. * Note: reporters that specify neither "group" nor "registry" attributes are treated as universal - * they will always be loaded for any group. These two attributes may also contain multiple comma- or * whitespace-separated values, in which case the reporter will be loaded for any matching value from @@ -634,7 +728,7 @@ public static String getRegistryName(SolrInfoMBean.Group group, String... names) * @param group selected group, not null * @param registryNames optional child registry name elements */ - public void loadReporters(PluginInfo[] pluginInfos, SolrResourceLoader loader, String tag, SolrInfoMBean.Group group, String... registryNames) { + public void loadReporters(PluginInfo[] pluginInfos, SolrResourceLoader loader, String tag, SolrInfoBean.Group group, String... registryNames) { if (pluginInfos == null || pluginInfos.length == 0) { return; } @@ -941,13 +1035,13 @@ public void loadShardReporters(PluginInfo[] pluginInfos, SolrCore core) { // prepare default plugin if none present in the config Map attrs = new HashMap<>(); attrs.put("name", "shardDefault"); - attrs.put("group", SolrInfoMBean.Group.shard.toString()); + attrs.put("group", SolrInfoBean.Group.shard.toString()); Map initArgs = new HashMap<>(); initArgs.put("period", DEFAULT_CLOUD_REPORTER_PERIOD); String registryName = core.getCoreMetricManager().getRegistryName(); // collect infos and normalize - List infos = prepareCloudPlugins(pluginInfos, SolrInfoMBean.Group.shard.toString(), SolrShardReporter.class.getName(), + List infos = prepareCloudPlugins(pluginInfos, SolrInfoBean.Group.shard.toString(), SolrShardReporter.class.getName(), attrs, initArgs, null); for (PluginInfo info : infos) { try { @@ -967,12 +1061,12 @@ public void loadClusterReporters(PluginInfo[] pluginInfos, CoreContainer cc) { } Map attrs = new HashMap<>(); attrs.put("name", "clusterDefault"); - attrs.put("group", SolrInfoMBean.Group.cluster.toString()); + attrs.put("group", SolrInfoBean.Group.cluster.toString()); Map initArgs = new HashMap<>(); initArgs.put("period", DEFAULT_CLOUD_REPORTER_PERIOD); - List infos = prepareCloudPlugins(pluginInfos, SolrInfoMBean.Group.cluster.toString(), SolrClusterReporter.class.getName(), + List infos = prepareCloudPlugins(pluginInfos, SolrInfoBean.Group.cluster.toString(), SolrClusterReporter.class.getName(), attrs, initArgs, null); - String registryName = getRegistryName(SolrInfoMBean.Group.cluster); + String registryName = getRegistryName(SolrInfoBean.Group.cluster); for (PluginInfo info : infos) { try { SolrMetricReporter reporter = loadReporter(registryName, cc.getResourceLoader(), info, null); diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrMetricReporter.java b/solr/core/src/java/org/apache/solr/metrics/SolrMetricReporter.java index ff2d3fcbdc65..9ad15d0168db 100644 --- a/solr/core/src/java/org/apache/solr/metrics/SolrMetricReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/SolrMetricReporter.java @@ -30,6 +30,7 @@ public abstract class SolrMetricReporter implements Closeable, PluginInfoInitial protected final String registryName; protected final SolrMetricManager metricManager; protected PluginInfo pluginInfo; + protected boolean enabled = true; /** * Create a reporter for metrics managed in a named registry. @@ -57,6 +58,17 @@ public void init(PluginInfo pluginInfo) { validate(); } + /** + * Enable reporting, defaults to true. Implementations should check this flag in + * {@link #validate()} and accordingly enable or disable reporting. + * @param enabled enable, defaults to true when null or not set. + */ + public void setEnabled(Boolean enabled) { + if (enabled != null) { + this.enabled = enabled; + } + } + /** * Get the effective {@link PluginInfo} instance that was used for * initialization of this plugin. diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java b/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java index 1f5b4f015139..4298c1842da9 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java @@ -50,6 +50,20 @@ public JmxObjectNameFactory(String reporterName, String domain, String... additi this.props = additionalProperties; } + /** + * Return current domain. + */ + public String getDomain() { + return domain; + } + + /** + * Return current reporterName. + */ + public String getReporterName() { + return reporterName; + } + /** * Create a hierarchical name. * @@ -60,7 +74,8 @@ public JmxObjectNameFactory(String reporterName, String domain, String... additi @Override public ObjectName createName(String type, String currentDomain, String name) { SolrMetricInfo metricInfo = SolrMetricInfo.of(name); - + String safeName = metricInfo != null ? metricInfo.name : name; + safeName = safeName.replaceAll(":", "_"); // It turns out that ObjectName(String) mostly preserves key ordering // as specified in the constructor (except for the 'type' key that ends // up at top level) - unlike ObjectName(String, Map) constructor @@ -90,24 +105,42 @@ public ObjectName createName(String type, String currentDomain, String name) { sb.append(currentDomain); sb.append(':'); } - sb.append("reporter="); - sb.append(reporterName); - sb.append(','); + if (props != null && props.length > 0) { + boolean added = false; + for (int i = 0; i < props.length; i += 2) { + if (props[i] == null || props[i].isEmpty()) { + continue; + } + if (props[i + 1] == null || props[i + 1].isEmpty()) { + continue; + } + sb.append(','); + sb.append(props[i]); + sb.append('='); + sb.append(props[i + 1]); + added = true; + } + if (added) { + sb.append(','); + } + } if (metricInfo != null) { sb.append("category="); sb.append(metricInfo.category.toString()); - sb.append(",scope="); - sb.append(metricInfo.scope); + if (metricInfo.scope != null) { + sb.append(",scope="); + sb.append(metricInfo.scope); + } // we could also split by type, but don't call it 'type' :) // if (type != null) { // sb.append(",class="); // sb.append(type); // } sb.append(",name="); - sb.append(metricInfo.name); + sb.append(safeName); } else { // make dotted names into hierarchies - String[] path = name.split("\\."); + String[] path = safeName.split("\\."); for (int i = 0; i < path.length - 1; i++) { if (i > 0) { sb.append(','); @@ -127,20 +160,6 @@ public ObjectName createName(String type, String currentDomain, String name) { sb.append("name="); sb.append(path[path.length - 1]); } - if (props != null && props.length > 0) { - for (int i = 0; i < props.length; i += 2) { - if (props[i] == null || props[i].isEmpty()) { - continue; - } - if (props[i + 1] == null || props[i + 1].isEmpty()) { - continue; - } - sb.append(','); - sb.append(props[i]); - sb.append('='); - sb.append(props[i + 1]); - } - } ObjectName objectName; diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/ReporterClientCache.java b/solr/core/src/java/org/apache/solr/metrics/reporters/ReporterClientCache.java new file mode 100644 index 000000000000..5745dec17382 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/ReporterClientCache.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.metrics.reporters; + +import java.io.Closeable; +import java.lang.invoke.MethodHandles; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Simple cache for reusable service clients used by some implementations of + * {@link org.apache.solr.metrics.SolrMetricReporter}. + */ +public class ReporterClientCache implements Closeable { + private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private final Map cache = new ConcurrentHashMap<>(); + + /** + * Provide an instance of service client. + * @param formal type + */ + public interface ClientProvider { + /** + * Get an instance of a service client. It's not specified that each time this + * method is invoked a new client instance should be returned. + * @return client instance + * @throws Exception when client creation encountered an error. + */ + T get() throws Exception; + } + + /** + * Get existing or register a new client. + * @param id client id + * @param clientProvider provider of new client instances + */ + public synchronized T getOrCreate(String id, ClientProvider clientProvider) { + T item = cache.get(id); + if (item == null) { + try { + item = clientProvider.get(); + cache.put(id, item); + } catch (Exception e) { + LOG.warn("Error providing a new client for id=" + id, e); + item = null; + } + } + return item; + } + + /** + * Empty this cache, and close all clients that are {@link Closeable}. + */ + public void close() { + for (T client : cache.values()) { + if (client instanceof Closeable) { + try { + ((Closeable)client).close(); + } catch (Exception e) { + LOG.warn("Error closing client " + client + ", ignoring...", e); + } + } + } + cache.clear(); + } +} diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGangliaReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGangliaReporter.java index 45561e58b58a..142ddd884e6d 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGangliaReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGangliaReporter.java @@ -17,6 +17,9 @@ package org.apache.solr.metrics.reporters; import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import com.codahale.metrics.MetricFilter; @@ -24,21 +27,26 @@ import info.ganglia.gmetric4j.gmetric.GMetric; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricReporter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * */ public class SolrGangliaReporter extends SolrMetricReporter { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private String host = null; private int port = -1; private boolean multicast; private int period = 60; private String instancePrefix = null; - private String filterPrefix = null; + private List filters = new ArrayList<>(); private boolean testing; private GangliaReporter reporter; + private static final ReporterClientCache serviceRegistry = new ReporterClientCache<>(); + // for unit tests GMetric ganglia = null; @@ -65,10 +73,24 @@ public void setPrefix(String prefix) { this.instancePrefix = prefix; } - public void setFilter(String filter) { - this.filterPrefix = filter; + /** + * Report only metrics with names matching any of the prefix filters. + * @param filters list of 0 or more prefixes. If the list is empty then + * all names will match. + */ + public void setFilter(List filters) { + if (filters == null || filters.isEmpty()) { + return; + } + this.filters.addAll(filters); } + // due to vagaries of SolrPluginUtils.invokeSetters we need this too + public void setFilter(String filter) { + if (filter != null && !filter.isEmpty()) { + this.filters.add(filter); + } + } public void setPeriod(int period) { this.period = period; @@ -89,6 +111,10 @@ void setGMetric(GMetric ganglia) { @Override protected void validate() throws IllegalStateException { + if (!enabled) { + log.info("Reporter disabled for registry " + registryName); + return; + } if (host == null) { throw new IllegalStateException("Init argument 'host' must be set to a valid Ganglia server name."); } @@ -106,12 +132,12 @@ protected void validate() throws IllegalStateException { //this is a separate method for unit tests void start() { if (!testing) { - try { - ganglia = new GMetric(host, port, - multicast ? GMetric.UDPAddressingMode.MULTICAST : GMetric.UDPAddressingMode.UNICAST, - 1); - } catch (IOException ioe) { - throw new IllegalStateException("Exception connecting to Ganglia", ioe); + String id = host + ":" + port + ":" + multicast; + ganglia = serviceRegistry.getOrCreate(id, () -> new GMetric(host, port, + multicast ? GMetric.UDPAddressingMode.MULTICAST : GMetric.UDPAddressingMode.UNICAST, + 1)); + if (ganglia == null) { + return; } } if (instancePrefix == null) { @@ -125,8 +151,8 @@ void start() { .convertDurationsTo(TimeUnit.MILLISECONDS) .prefixedWith(instancePrefix); MetricFilter filter; - if (filterPrefix != null) { - filter = new SolrMetricManager.PrefixFilter(filterPrefix); + if (!filters.isEmpty()) { + filter = new SolrMetricManager.PrefixFilter(filters); } else { filter = MetricFilter.ALL; } diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGraphiteReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGraphiteReporter.java index 8565ce86c058..d5b7a203ab8a 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGraphiteReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrGraphiteReporter.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import com.codahale.metrics.MetricFilter; @@ -41,9 +43,11 @@ public class SolrGraphiteReporter extends SolrMetricReporter { private int period = 60; private boolean pickled = false; private String instancePrefix = null; - private String filterPrefix = null; + private List filters = new ArrayList<>(); private GraphiteReporter reporter = null; + private static final ReporterClientCache serviceRegistry = new ReporterClientCache<>(); + /** * Create a Graphite reporter for metrics managed in a named registry. * @@ -67,10 +71,25 @@ public void setPrefix(String prefix) { this.instancePrefix = prefix; } + /** + * Report only metrics with names matching any of the prefix filters. + * @param filters list of 0 or more prefixes. If the list is empty then + * all names will match. + */ + public void setFilter(List filters) { + if (filters == null || filters.isEmpty()) { + return; + } + this.filters.addAll(filters); + } + public void setFilter(String filter) { - this.filterPrefix = filter; + if (filter != null && !filter.isEmpty()) { + this.filters.add(filter); + } } + public void setPickled(boolean pickled) { this.pickled = pickled; } @@ -81,6 +100,10 @@ public void setPeriod(int period) { @Override protected void validate() throws IllegalStateException { + if (!enabled) { + log.info("Reporter disabled for registry " + registryName); + return; + } if (host == null) { throw new IllegalStateException("Init argument 'host' must be set to a valid Graphite server name."); } @@ -93,12 +116,15 @@ protected void validate() throws IllegalStateException { if (period < 1) { throw new IllegalStateException("Init argument 'period' is in time unit 'seconds' and must be at least 1."); } - final GraphiteSender graphite; - if (pickled) { - graphite = new PickledGraphite(host, port); - } else { - graphite = new Graphite(host, port); - } + GraphiteSender graphite; + String id = host + ":" + port + ":" + pickled; + graphite = serviceRegistry.getOrCreate(id, () -> { + if (pickled) { + return new PickledGraphite(host, port); + } else { + return new Graphite(host, port); + } + }); if (instancePrefix == null) { instancePrefix = registryName; } else { @@ -110,8 +136,8 @@ protected void validate() throws IllegalStateException { .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS); MetricFilter filter; - if (filterPrefix != null) { - filter = new SolrMetricManager.PrefixFilter(filterPrefix); + if (!filters.isEmpty()) { + filter = new SolrMetricManager.PrefixFilter(filters); } else { filter = MetricFilter.ALL; } diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java index 0e78eee038a3..d09e04372144 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java @@ -16,15 +16,25 @@ */ package org.apache.solr.metrics.reporters; +import javax.management.InstanceNotFoundException; import javax.management.MBeanServer; +import javax.management.ObjectInstance; +import javax.management.ObjectName; -import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; import java.util.Locale; +import java.util.Set; +import com.codahale.metrics.Gauge; import com.codahale.metrics.JmxReporter; +import com.codahale.metrics.MetricFilter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.MetricRegistryListener; import org.apache.solr.core.PluginInfo; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricReporter; import org.apache.solr.util.JmxUtil; @@ -34,17 +44,25 @@ /** * A {@link SolrMetricReporter} that finds (or creates) a MBeanServer from * the given configuration and registers metrics to it with JMX. + *

    NOTE: {@link JmxReporter} that this class uses exports only newly added metrics (it doesn't + * process already existing metrics in a registry)

    */ public class SolrJmxReporter extends SolrMetricReporter { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + private static final ReporterClientCache serviceRegistry = new ReporterClientCache<>(); + private String domain; private String agentId; private String serviceUrl; + private String rootName; + private List filters = new ArrayList<>(); private JmxReporter reporter; + private MetricRegistry registry; private MBeanServer mBeanServer; + private MetricsMapListener listener; /** * Creates a new instance of {@link SolrJmxReporter}. @@ -57,7 +75,7 @@ public SolrJmxReporter(SolrMetricManager metricManager, String registryName) { } /** - * Initializes the reporter by finding (or creating) a MBeanServer + * Initializes the reporter by finding an MBeanServer * and registering the metricManager's metric registry. * * @param pluginInfo the configuration for the reporter @@ -65,44 +83,62 @@ public SolrJmxReporter(SolrMetricManager metricManager, String registryName) { @Override public synchronized void init(PluginInfo pluginInfo) { super.init(pluginInfo); - + if (!enabled) { + log.info("Reporter disabled for registry " + registryName); + return; + } + log.debug("Initializing for registry " + registryName); if (serviceUrl != null && agentId != null) { - ManagementFactory.getPlatformMBeanServer(); // Ensure at least one MBeanServer is available. mBeanServer = JmxUtil.findFirstMBeanServer(); - log.warn("No more than one of serviceUrl(%s) and agentId(%s) should be configured, using first MBeanServer instead of configuration.", + log.warn("No more than one of serviceUrl({}) and agentId({}) should be configured, using first MBeanServer instead of configuration.", serviceUrl, agentId, mBeanServer); - } - else if (serviceUrl != null) { - try { - mBeanServer = JmxUtil.findMBeanServerForServiceUrl(serviceUrl); - } catch (IOException e) { - log.warn("findMBeanServerForServiceUrl(%s) exception: %s", serviceUrl, e); - mBeanServer = null; - } - } - else if (agentId != null) { + } else if (serviceUrl != null) { + // reuse existing services + mBeanServer = serviceRegistry.getOrCreate(serviceUrl, () -> JmxUtil.findMBeanServerForServiceUrl(serviceUrl)); + } else if (agentId != null) { mBeanServer = JmxUtil.findMBeanServerForAgentId(agentId); } else { - ManagementFactory.getPlatformMBeanServer(); // Ensure at least one MBeanServer is available. mBeanServer = JmxUtil.findFirstMBeanServer(); - log.warn("No serviceUrl or agentId was configured, using first MBeanServer.", mBeanServer); + log.debug("No serviceUrl or agentId was configured, using first MBeanServer: " + mBeanServer); } if (mBeanServer == null) { - log.warn("No JMX server found. Not exposing Solr metrics."); + log.warn("No JMX server found. Not exposing Solr metrics via JMX."); return; } - JmxObjectNameFactory jmxObjectNameFactory = new JmxObjectNameFactory(pluginInfo.name, domain); + if (domain == null || domain.isEmpty()) { + domain = registryName; + } + String fullDomain = domain; + if (rootName != null && !rootName.isEmpty()) { + fullDomain = rootName + "." + domain; + } + JmxObjectNameFactory jmxObjectNameFactory = new JmxObjectNameFactory(pluginInfo.name, fullDomain); + registry = metricManager.registry(registryName); + // filter out MetricsMap gauges - we have a better way of handling them + MetricFilter mmFilter = (name, metric) -> !(metric instanceof MetricsMap); + MetricFilter filter; + if (filters.isEmpty()) { + filter = mmFilter; + } else { + // apply also prefix filters + SolrMetricManager.PrefixFilter prefixFilter = new SolrMetricManager.PrefixFilter(filters); + filter = new SolrMetricManager.AndFilter(prefixFilter, mmFilter); + } - reporter = JmxReporter.forRegistry(metricManager.registry(registryName)) + reporter = JmxReporter.forRegistry(registry) .registerWith(mBeanServer) - .inDomain(domain) + .inDomain(fullDomain) + .filter(filter) .createsObjectNamesWith(jmxObjectNameFactory) .build(); reporter.start(); + // workaround for inability to register custom MBeans (to be available in metrics 4.0?) + listener = new MetricsMapListener(mBeanServer, jmxObjectNameFactory); + registry.addListener(listener); - log.info("JMX monitoring enabled at server: " + mBeanServer); + log.info("JMX monitoring for '" + fullDomain + "' (registry '" + registryName + "') enabled at server: " + mBeanServer); } /** @@ -114,6 +150,11 @@ public synchronized void close() { reporter.close(); reporter = null; } + if (listener != null && registry != null) { + registry.removeListener(listener); + listener.close(); + listener = null; + } } /** @@ -127,9 +168,19 @@ protected void validate() throws IllegalStateException { // Nothing to validate } + + /** + * Set root name of the JMX hierarchy for this reporter. Default (null or empty) is none, ie. + * the hierarchy will start from the domain name. + * @param rootName root name of the JMX name hierarchy, or null or empty for default. + */ + public void setRootName(String rootName) { + this.rootName = rootName; + } + /** * Sets the domain with which MBeans are published. If none is set, - * the domain defaults to the name of the core. + * the domain defaults to the name of the registry. * * @param domain the domain */ @@ -162,7 +213,46 @@ public void setAgentId(String agentId) { } /** - * Retrieves the reporter's MBeanServer. + * Return configured agentId or null. + */ + public String getAgentId() { + return agentId; + } + + /** + * Return configured serviceUrl or null. + */ + public String getServiceUrl() { + return serviceUrl; + } + + /** + * Return configured domain or null. + */ + public String getDomain() { + return domain; + } + + /** + * Report only metrics with names matching any of the prefix filters. + * @param filters list of 0 or more prefixes. If the list is empty then + * all names will match. + */ + public void setFilter(List filters) { + if (filters == null || filters.isEmpty()) { + return; + } + this.filters.addAll(filters); + } + + public void setFilter(String filter) { + if (filter != null && !filter.isEmpty()) { + this.filters.add(filter); + } + } + + /** + * Return the reporter's MBeanServer. * * @return the reporter's MBeanServer */ @@ -170,10 +260,72 @@ public MBeanServer getMBeanServer() { return mBeanServer; } + /** + * For unit tests. + * @return true if this reporter is actively reporting metrics to JMX. + */ + public boolean isActive() { + return reporter != null; + } + @Override public String toString() { - return String.format(Locale.ENGLISH, "[%s@%s: domain = %s, service url = %s, agent id = %s]", - getClass().getName(), Integer.toHexString(hashCode()), domain, serviceUrl, agentId); + return String.format(Locale.ENGLISH, "[%s@%s: rootName = %s, domain = %s, service url = %s, agent id = %s]", + getClass().getName(), Integer.toHexString(hashCode()), rootName, domain, serviceUrl, agentId); } + private static class MetricsMapListener extends MetricRegistryListener.Base { + MBeanServer server; + JmxObjectNameFactory nameFactory; + // keep the names so that we can unregister them on core close + Set registered = new HashSet<>(); + + MetricsMapListener(MBeanServer server, JmxObjectNameFactory nameFactory) { + this.server = server; + this.nameFactory = nameFactory; + } + + @Override + public void onGaugeAdded(String name, Gauge gauge) { + if (!(gauge instanceof MetricsMap)) { + return; + } + synchronized (server) { + try { + ObjectName objectName = nameFactory.createName("gauges", nameFactory.getDomain(), name); + log.debug("REGISTER " + objectName); + if (registered.contains(objectName) || server.isRegistered(objectName)) { + log.debug("-unregistering old instance of " + objectName); + try { + server.unregisterMBean(objectName); + } catch (InstanceNotFoundException e) { + // ignore + } + } + // some MBean servers re-write object name to include additional properties + ObjectInstance instance = server.registerMBean(gauge, objectName); + if (instance != null) { + registered.add(instance.getObjectName()); + } + } catch (Exception e) { + log.warn("bean registration error", e); + } + } + } + + public void close() { + synchronized (server) { + for (ObjectName name : registered) { + try { + if (server.isRegistered(name)) { + server.unregisterMBean(name); + } + } catch (Exception e) { + log.debug("bean unregistration error", e); + } + } + registered.clear(); + } + } + } } diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java index 817dda17f94b..8b7c35e88e42 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import com.codahale.metrics.MetricFilter; @@ -47,7 +49,7 @@ public class SolrSlf4jReporter extends SolrMetricReporter { private int period = 60; private String instancePrefix = null; private String logger = null; - private String filterPrefix = null; + private List filters = new ArrayList<>(); private Slf4jReporter reporter; /** @@ -65,10 +67,25 @@ public void setPrefix(String prefix) { this.instancePrefix = prefix; } + /** + * Report only metrics with names matching any of the prefix filters. + * @param filters list of 0 or more prefixes. If the list is empty then + * all names will match. + */ + public void setFilter(List filters) { + if (filters == null || filters.isEmpty()) { + return; + } + this.filters.addAll(filters); + } + public void setFilter(String filter) { - this.filterPrefix = filter; + if (filter != null && !filter.isEmpty()) { + this.filters.add(filter); + } } + public void setLogger(String logger) { this.logger = logger; } @@ -79,6 +96,10 @@ public void setPeriod(int period) { @Override protected void validate() throws IllegalStateException { + if (!enabled) { + log.info("Reporter disabled for registry " + registryName); + return; + } if (period < 1) { throw new IllegalStateException("Init argument 'period' is in time unit 'seconds' and must be at least 1."); } @@ -93,8 +114,8 @@ protected void validate() throws IllegalStateException { .convertDurationsTo(TimeUnit.MILLISECONDS); MetricFilter filter; - if (filterPrefix != null) { - filter = new SolrMetricManager.PrefixFilter(filterPrefix); + if (!filters.isEmpty()) { + filter = new SolrMetricManager.PrefixFilter(filters); } else { filter = MetricFilter.ALL; } diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java index a34accd82aa0..c677bea66a21 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java @@ -33,7 +33,7 @@ import org.apache.solr.common.cloud.SolrZkClient; import org.apache.solr.common.cloud.ZkNodeProps; import org.apache.solr.core.CoreContainer; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.handler.admin.MetricsCollectorHandler; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricReporter; @@ -92,14 +92,14 @@ public class SolrClusterReporter extends SolrMetricReporter { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - public static final String CLUSTER_GROUP = SolrMetricManager.overridableRegistryName(SolrInfoMBean.Group.cluster.toString()); + public static final String CLUSTER_GROUP = SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.cluster.toString()); public static final List DEFAULT_REPORTS = new ArrayList() {{ add(new SolrReporter.Report(CLUSTER_GROUP, "jetty", - SolrMetricManager.overridableRegistryName(SolrInfoMBean.Group.jetty.toString()), + SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.jetty.toString()), Collections.emptySet())); // all metrics add(new SolrReporter.Report(CLUSTER_GROUP, "jvm", - SolrMetricManager.overridableRegistryName(SolrInfoMBean.Group.jvm.toString()), + SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.jvm.toString()), new HashSet() {{ add("memory\\.total\\..*"); add("memory\\.heap\\..*"); @@ -109,7 +109,7 @@ public class SolrClusterReporter extends SolrMetricReporter { add("os\\.OpenFileDescriptorCount"); add("threads\\.count"); }})); - add(new SolrReporter.Report(CLUSTER_GROUP, "node", SolrMetricManager.overridableRegistryName(SolrInfoMBean.Group.node.toString()), + add(new SolrReporter.Report(CLUSTER_GROUP, "node", SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.node.toString()), new HashSet() {{ add("CONTAINER\\.cores\\..*"); add("CONTAINER\\.fs\\..*"); @@ -159,6 +159,16 @@ public void setReport(List reportConfig) { }); } + public void setReport(Map map) { + if (map == null || map.isEmpty()) { + return; + } + SolrReporter.Report r = SolrReporter.Report.fromMap(map); + if (r != null) { + reports.add(r); + } + } + // for unit tests int getPeriod() { return period; @@ -170,9 +180,6 @@ List getReports() { @Override protected void validate() throws IllegalStateException { - if (period < 1) { - log.info("Turning off node reporter, period=" + period); - } if (reports.isEmpty()) { // set defaults reports = DEFAULT_REPORTS; } @@ -189,12 +196,17 @@ public void setCoreContainer(CoreContainer cc) { if (reporter != null) { reporter.close();; } + if (!enabled) { + log.info("Reporter disabled for registry " + registryName); + return; + } // start reporter only in cloud mode if (!cc.isZooKeeperAware()) { log.warn("Not ZK-aware, not starting..."); return; } if (period < 1) { // don't start it + log.info("Turning off node reporter, period=" + period); return; } HttpClient httpClient = cc.getUpdateShardHandler().getHttpClient(); @@ -205,6 +217,7 @@ public void setCoreContainer(CoreContainer cc) { .convertDurationsTo(TimeUnit.MILLISECONDS) .withHandler(handler) .withReporterId(reporterId) + .setCompact(true) .cloudClient(false) // we want to send reports specifically to a selected leader instance .skipAggregateValues(true) // we don't want to transport details of aggregates .skipHistograms(true) // we don't want to transport histograms diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java index 0bca68f52df0..8d36cefdc0f1 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrReporter.java @@ -126,6 +126,7 @@ public static class Builder { private boolean skipHistograms; private boolean skipAggregateValues; private boolean cloudClient; + private boolean compact; private SolrParams params; /** @@ -146,6 +147,7 @@ private Builder(SolrMetricManager metricManager, List reports) { this.skipHistograms = false; this.skipAggregateValues = false; this.cloudClient = false; + this.compact = true; this.params = null; } @@ -169,6 +171,16 @@ public Builder cloudClient(boolean cloudClient) { return this; } + /** + * If true then use "compact" data representation. + * @param compact compact representation. + * @return {@code this} + */ + public Builder setCompact(boolean compact) { + this.compact = compact; + return this; + } + /** * Histograms are difficult / impossible to aggregate, so it may not be * worth to report them. @@ -244,7 +256,7 @@ public Builder convertDurationsTo(TimeUnit durationUnit) { */ public SolrReporter build(HttpClient client, Supplier urlProvider) { return new SolrReporter(client, urlProvider, metricManager, reports, handler, reporterId, rateUnit, durationUnit, - params, skipHistograms, skipAggregateValues, cloudClient); + params, skipHistograms, skipAggregateValues, cloudClient, compact); } } @@ -258,6 +270,7 @@ public SolrReporter build(HttpClient client, Supplier urlProvider) { private boolean skipHistograms; private boolean skipAggregateValues; private boolean cloudClient; + private boolean compact; private ModifiableSolrParams params; private Map metadata; @@ -288,7 +301,8 @@ public String toString() { public SolrReporter(HttpClient httpClient, Supplier urlProvider, SolrMetricManager metricManager, List metrics, String handler, String reporterId, TimeUnit rateUnit, TimeUnit durationUnit, - SolrParams params, boolean skipHistograms, boolean skipAggregateValues, boolean cloudClient) { + SolrParams params, boolean skipHistograms, boolean skipAggregateValues, + boolean cloudClient, boolean compact) { super(null, "solr-reporter", MetricFilter.ALL, rateUnit, durationUnit); this.metricManager = metricManager; this.urlProvider = urlProvider; @@ -311,6 +325,7 @@ public SolrReporter(HttpClient httpClient, Supplier urlProvider, SolrMet this.skipHistograms = skipHistograms; this.skipAggregateValues = skipAggregateValues; this.cloudClient = cloudClient; + this.compact = compact; this.params = new ModifiableSolrParams(); this.params.set(REPORTER_ID, reporterId); // allow overrides to take precedence @@ -361,7 +376,7 @@ public void report() { } final String effectiveGroup = group; MetricUtils.toSolrInputDocuments(metricManager.registry(registryName), Collections.singletonList(report.filter), MetricFilter.ALL, - skipHistograms, skipAggregateValues, false, metadata, doc -> { + MetricUtils.PropertyFilter.ALL, skipHistograms, skipAggregateValues, compact, metadata, doc -> { doc.setField(REGISTRY_ID, registryName); doc.setField(GROUP_ID, effectiveGroup); if (effectiveLabel != null) { diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrShardReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrShardReporter.java index 8b36d3e0c96d..6ae84ac74bac 100644 --- a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrShardReporter.java +++ b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrShardReporter.java @@ -98,7 +98,13 @@ public void setFilter(List filterConfig) { if (filterConfig == null || filterConfig.isEmpty()) { return; } - filters = filterConfig; + filters.addAll(filterConfig); + } + + public void setFilter(String filter) { + if (filter != null && !filter.isEmpty()) { + this.filters.add(filter); + } } // for unit tests @@ -108,9 +114,6 @@ int getPeriod() { @Override protected void validate() throws IllegalStateException { - if (period < 1) { - log.info("Turning off shard reporter, period=" + period); - } if (filters.isEmpty()) { filters = DEFAULT_FILTERS; } @@ -128,13 +131,17 @@ public void setCore(SolrCore core) { if (reporter != null) { reporter.close(); } + if (!enabled) { + log.info("Reporter disabled for registry " + registryName); + return; + } if (core.getCoreDescriptor().getCloudDescriptor() == null) { // not a cloud core log.warn("Not initializing shard reporter for non-cloud core " + core.getName()); return; } if (period < 1) { // don't start it - log.warn("Not starting shard reporter "); + log.warn("period=" + period + ", not starting shard reporter "); return; } // our id is coreNodeName @@ -151,10 +158,11 @@ public void setCore(SolrCore core) { .convertDurationsTo(TimeUnit.MILLISECONDS) .withHandler(handler) .withReporterId(id) + .setCompact(true) .cloudClient(false) // we want to send reports specifically to a selected leader instance .skipAggregateValues(true) // we don't want to transport details of aggregates .skipHistograms(true) // we don't want to transport histograms - .build(core.getCoreDescriptor().getCoreContainer().getUpdateShardHandler().getHttpClient(), new LeaderUrlSupplier(core)); + .build(core.getCoreContainer().getUpdateShardHandler().getHttpClient(), new LeaderUrlSupplier(core)); reporter.start(period, TimeUnit.SECONDS); } @@ -172,7 +180,7 @@ public String get() { if (cd == null) { return null; } - ClusterState state = core.getCoreDescriptor().getCoreContainer().getZkController().getClusterState(); + ClusterState state = core.getCoreContainer().getZkController().getClusterState(); DocCollection collection = state.getCollection(core.getCoreDescriptor().getCollectionName()); Replica replica = collection.getLeader(core.getCoreDescriptor().getCloudDescriptor().getShardId()); if (replica == null) { diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParser.java b/solr/core/src/java/org/apache/solr/parser/QueryParser.java index 2b64b88b68d6..39ec673824ea 100644 --- a/solr/core/src/java/org/apache/solr/parser/QueryParser.java +++ b/solr/core/src/java/org/apache/solr/parser/QueryParser.java @@ -21,7 +21,7 @@ public class QueryParser extends SolrQueryParserBase implements QueryParserConst static public enum Operator { OR, AND } /** default split on whitespace behavior */ - public static final boolean DEFAULT_SPLIT_ON_WHITESPACE = true; + public static final boolean DEFAULT_SPLIT_ON_WHITESPACE = false; public QueryParser(String defaultField, QParser parser) { this(new FastCharStream(new StringReader(""))); diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParser.jj b/solr/core/src/java/org/apache/solr/parser/QueryParser.jj index c07b28d477dd..1dfdfcb95748 100644 --- a/solr/core/src/java/org/apache/solr/parser/QueryParser.jj +++ b/solr/core/src/java/org/apache/solr/parser/QueryParser.jj @@ -45,7 +45,7 @@ public class QueryParser extends SolrQueryParserBase { static public enum Operator { OR, AND } /** default split on whitespace behavior */ - public static final boolean DEFAULT_SPLIT_ON_WHITESPACE = true; + public static final boolean DEFAULT_SPLIT_ON_WHITESPACE = false; public QueryParser(String defaultField, QParser parser) { this(new FastCharStream(new StringReader(""))); diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java index 0bfef4c7415f..406a4ed4c8c0 100644 --- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java +++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java @@ -52,8 +52,9 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.grouping.AllGroupHeadsCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupsCollector; -import org.apache.lucene.search.grouping.term.TermGroupFacetCollector; +import org.apache.lucene.search.grouping.AllGroupsCollector; +import org.apache.lucene.search.grouping.TermGroupFacetCollector; +import org.apache.lucene.search.grouping.TermGroupSelector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.StringHelper; @@ -330,7 +331,7 @@ public int getGroupedFacetQueryCount(Query facetQuery, DocSet docSet) throws IOE ); } - TermAllGroupsCollector collector = new TermAllGroupsCollector(groupField); + AllGroupsCollector collector = new AllGroupsCollector<>(new TermGroupSelector(groupField)); Filter mainQueryFilter = docSet.getTopFilter(); // This returns a filter that only matches documents matching with q param and fq params Query filteredFacetQuery = new BooleanQuery.Builder() .add(facetQuery, Occur.MUST) diff --git a/solr/core/src/java/org/apache/solr/request/SolrRequestHandler.java b/solr/core/src/java/org/apache/solr/request/SolrRequestHandler.java index 82ce2e0fbeb3..8350f9ed1c07 100644 --- a/solr/core/src/java/org/apache/solr/request/SolrRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/request/SolrRequestHandler.java @@ -17,7 +17,7 @@ package org.apache.solr.request; import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.response.SolrQueryResponse; /** @@ -38,7 +38,7 @@ * * */ -public interface SolrRequestHandler extends SolrInfoMBean { +public interface SolrRequestHandler extends SolrInfoBean { /** init will be called just once, immediately after creation. *

    The args are user-level initialization parameters that diff --git a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java index 7b27c3f46c8a..286d2c12c8b2 100644 --- a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java +++ b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java @@ -272,7 +272,8 @@ public Query getFieldQuery(QParser parser, SchemaField field, String externalVal */ public RawCurrencyValueSource getValueSource(SchemaField field, QParser parser) { - field.checkFieldCacheSource(); + getAmountField(field).checkFieldCacheSource(); + getCurrencyField(field).checkFieldCacheSource(); return new RawCurrencyValueSource(field, defaultCurrency, parser); } @@ -488,8 +489,8 @@ public RawCurrencyValueSource(SchemaField sfield, String targetCurrencyCode, QPa throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Currency code not supported by this JVM: " + targetCurrencyCode); } - SchemaField amountField = schema.getField(sf.getName() + POLY_FIELD_SEPARATOR + FIELD_SUFFIX_AMOUNT_RAW); - SchemaField currencyField = schema.getField(sf.getName() + POLY_FIELD_SEPARATOR + FIELD_SUFFIX_CURRENCY); + SchemaField amountField = getAmountField(sf); + SchemaField currencyField = getCurrencyField(sf); currencyValues = currencyField.getType().getValueSource(currencyField, parser); amountValues = amountField.getType().getValueSource(amountField, parser); diff --git a/solr/core/src/java/org/apache/solr/schema/DatePointField.java b/solr/core/src/java/org/apache/solr/schema/DatePointField.java index 8e4a8a022181..ea81ea30b68c 100644 --- a/solr/core/src/java/org/apache/solr/schema/DatePointField.java +++ b/solr/core/src/java/org/apache/solr/schema/DatePointField.java @@ -206,7 +206,7 @@ public SortField getSortField(SchemaField field, boolean top) { @Override public UninvertingReader.Type getUninversionType(SchemaField sf) { if (sf.multiValued()) { - return UninvertingReader.Type.SORTED_LONG; + return null; } else { return UninvertingReader.Type.LONG_POINT; } @@ -267,4 +267,4 @@ public String longToString(long val) { public long externalToLong(String extVal) { return DateMathParser.parseMath(null, extVal).getTime(); } -} \ No newline at end of file +} diff --git a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java index 24f40292147d..05a1ce7d1245 100644 --- a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java +++ b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java @@ -151,7 +151,7 @@ public SortField getSortField(SchemaField field, boolean top) { @Override public Type getUninversionType(SchemaField sf) { if (sf.multiValued()) { - return Type.SORTED_DOUBLE; + return null; } else { return Type.DOUBLE_POINT; } diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java index 07eb866c7c46..3519283f1245 100644 --- a/solr/core/src/java/org/apache/solr/schema/FieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java @@ -453,7 +453,7 @@ public Query getPrefixQuery(QParser parser, SchemaField sf, String termStr) { } /** - * DocValues is not enabled for a field, but it's indexed, docvalues can be constructed + * If DocValues is not enabled for a field, but it's indexed, docvalues can be constructed * on the fly (uninverted, aka fieldcache) on the first request to sort, facet, etc. * This specifies the structure to use. * diff --git a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java index 46414aea3e5e..fe9c75338023 100644 --- a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java +++ b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java @@ -151,7 +151,7 @@ public SortField getSortField(SchemaField field, boolean top) { @Override public Type getUninversionType(SchemaField sf) { if (sf.multiValued()) { - return Type.SORTED_FLOAT; + return null; } else { return Type.FLOAT_POINT; } diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java index 3de59eea3683..2ea63d4074b1 100644 --- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java +++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java @@ -46,7 +46,6 @@ import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiFields; @@ -375,12 +374,14 @@ public void refreshAnalyzers() { } public Map getUninversionMap(IndexReader reader) { - Map map = new HashMap<>(); + final Map map = new HashMap<>(); for (FieldInfo f : MultiFields.getMergedFieldInfos(reader)) { - if (f.getDocValuesType() == DocValuesType.NONE && f.getIndexOptions() != IndexOptions.NONE) { - SchemaField sf = getFieldOrNull(f.name); - if (sf != null) { - UninvertingReader.Type type = sf.getType().getUninversionType(sf); + if (f.getDocValuesType() == DocValuesType.NONE) { + // we have a field (of some kind) in the reader w/o DocValues + // if we have an equivilent indexed=true field in the schema, trust it's uninversion type (if any) + final SchemaField sf = getFieldOrNull(f.name); + if (sf != null && sf.indexed()) { + final UninvertingReader.Type type = sf.getType().getUninversionType(sf); if (type != null) { map.put(f.name, type); } diff --git a/solr/core/src/java/org/apache/solr/schema/IntPointField.java b/solr/core/src/java/org/apache/solr/schema/IntPointField.java index 66ac606c35e3..f47f45079e99 100644 --- a/solr/core/src/java/org/apache/solr/schema/IntPointField.java +++ b/solr/core/src/java/org/apache/solr/schema/IntPointField.java @@ -148,7 +148,7 @@ public SortField getSortField(SchemaField field, boolean top) { @Override public Type getUninversionType(SchemaField sf) { if (sf.multiValued()) { - return Type.SORTED_INTEGER; + return null; } else { return Type.INTEGER_POINT; } diff --git a/solr/core/src/java/org/apache/solr/schema/LongPointField.java b/solr/core/src/java/org/apache/solr/schema/LongPointField.java index c3bc6305d409..bef6c4723467 100644 --- a/solr/core/src/java/org/apache/solr/schema/LongPointField.java +++ b/solr/core/src/java/org/apache/solr/schema/LongPointField.java @@ -147,7 +147,7 @@ public SortField getSortField(SchemaField field, boolean top) { @Override public Type getUninversionType(SchemaField sf) { if (sf.multiValued()) { - return Type.SORTED_LONG; + return null; } else { return Type.LONG_POINT; } diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaField.java b/solr/core/src/java/org/apache/solr/schema/SchemaField.java index 1e18ee053dc5..c2e8cca8bab6 100644 --- a/solr/core/src/java/org/apache/solr/schema/SchemaField.java +++ b/solr/core/src/java/org/apache/solr/schema/SchemaField.java @@ -161,20 +161,17 @@ public SortField getSortField(boolean top) { * @see FieldType#getSortField */ public void checkSortability() throws SolrException { - if (! (indexed() || hasDocValues()) ) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - "can not sort on a field which is neither indexed nor has doc values: " - + getName()); - } if ( multiValued() ) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "can not sort on multivalued field: " + getName()); } - if (this.type.isPointField() && !hasDocValues()) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - "can not sort on a PointField without doc values: " - + getName()); + if (! hasDocValues() ) { + if ( ! ( indexed() && null != this.type.getUninversionType(this) ) ) { + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, + "can not sort on a field w/o docValues unless it is indexed and supports Uninversion: " + + getName()); + } } } @@ -187,22 +184,18 @@ public void checkSortability() throws SolrException { * @see FieldType#getValueSource */ public void checkFieldCacheSource() throws SolrException { - if (! (indexed() || hasDocValues()) ) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - "can not use FieldCache on a field which is neither indexed nor has doc values: " - + getName()); - } if ( multiValued() ) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "can not use FieldCache on multivalued field: " + getName()); } - if (this.type.isPointField() && !hasDocValues()) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - "Point fields can't use FieldCache. Use docValues=true for field: " - + getName()); + if (! hasDocValues() ) { + if ( ! ( indexed() && null != this.type.getUninversionType(this) ) ) { + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, + "can not use FieldCache on a field w/o docValues unless it is indexed and supports Uninversion: " + + getName()); + } } - } static SchemaField create(String name, FieldType ft, Map props) { @@ -343,6 +336,7 @@ public SimpleOrderedMap getNamedPropertyValues(boolean showDefaults) { properties.add(getPropertyName(OMIT_POSITIONS), omitPositions()); properties.add(getPropertyName(STORE_OFFSETS), storeOffsetsWithPositions()); properties.add(getPropertyName(MULTIVALUED), multiValued()); + properties.add(getPropertyName(LARGE_FIELD), isLarge()); if (sortMissingFirst()) { properties.add(getPropertyName(SORT_MISSING_FIRST), sortMissingFirst()); } else if (sortMissingLast()) { diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaManager.java b/solr/core/src/java/org/apache/solr/schema/SchemaManager.java index 7092c09e9608..1658e676418f 100644 --- a/solr/core/src/java/org/apache/solr/schema/SchemaManager.java +++ b/solr/core/src/java/org/apache/solr/schema/SchemaManager.java @@ -127,7 +127,7 @@ private List doOperations(List operations) throws InterruptedE latestVersion = ZkController.persistConfigResourceToZooKeeper (zkLoader, managedIndexSchema.getSchemaZkVersion(), managedIndexSchema.getResourceName(), sw.toString().getBytes(StandardCharsets.UTF_8), true); - req.getCore().getCoreDescriptor().getCoreContainer().reload(req.getCore().getName()); + req.getCore().getCoreContainer().reload(req.getCore().getName()); break; } catch (ZkController.ResourceModifiedInZkException e) { log.info("Schema was modified by another node. Retrying.."); diff --git a/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java b/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java index ee65fe84d6b7..e7194045095d 100644 --- a/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java +++ b/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java @@ -55,7 +55,7 @@ public ZkIndexSchemaReader(ManagedIndexSchemaFactory managedIndexSchemaFactory, solrCore.addCloseHook(new CloseHook() { @Override public void preClose(SolrCore core) { - CoreContainer cc = core.getCoreDescriptor().getCoreContainer(); + CoreContainer cc = core.getCoreContainer(); if (cc.isZooKeeperAware()) { log.debug("Removing ZkIndexSchemaReader OnReconnect listener as core "+core.getName()+" is shutting down."); ZkIndexSchemaReader.this.isRemoved = true; diff --git a/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java b/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java index 90fc7e92cc31..efd1c202229b 100644 --- a/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java +++ b/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java @@ -98,6 +98,7 @@ public DisMaxQParser(String qstr, SolrParams localParams, SolrParams params, Sol @Override public Query parse() throws SyntaxError { + parsed = true; SolrParams solrParams = SolrParams.wrapDefaults(localParams, params); @@ -265,6 +266,7 @@ protected SolrPluginUtils.DisjunctionMaxQueryParser getParser(Map IMPOSSIBLE_FIELD_NAME); parser.addAlias(IMPOSSIBLE_FIELD_NAME, tiebreaker, fields); parser.setPhraseSlop(slop); + parser.setSplitOnWhitespace(true); return parser; } diff --git a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java index 07d7e516394b..937f77e02557 100644 --- a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java +++ b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java @@ -1063,7 +1063,7 @@ protected Query getFieldQuery(String field, List queryTerms, boolean raw this.val = null; this.vals = queryTerms; this.slop = getPhraseSlop(); - return getAliasedMultiTermQuery(queryTerms); + return getAliasedMultiTermQuery(); } @Override @@ -1188,11 +1188,11 @@ protected Query getAliasedQuery() throws SyntaxError { * DisjunctionMaxQuery. (so yes: aliases which point at other * aliases should work) */ - protected Query getAliasedMultiTermQuery(List queryTerms) throws SyntaxError { + protected Query getAliasedMultiTermQuery() throws SyntaxError { Alias a = aliases.get(field); this.validateCyclicAliasing(field); if (a != null) { - List lst = getQueries(a); + List lst = getMultiTermQueries(a); if (lst == null || lst.size() == 0) { return getQuery(); } @@ -1203,14 +1203,25 @@ protected Query getAliasedMultiTermQuery(List queryTerms) throws SyntaxE // DisMaxQuery.rewrite() removes itself if there is just a single clause anyway. // if (lst.size()==1) return lst.get(0); if (makeDismax) { - if (lst.get(0) instanceof BooleanQuery && allSameQueryStructure(lst)) { + Query firstQuery = lst.get(0); + if ((firstQuery instanceof BooleanQuery + || (firstQuery instanceof BoostQuery && ((BoostQuery)firstQuery).getQuery() instanceof BooleanQuery)) + && allSameQueryStructure(lst)) { BooleanQuery.Builder q = new BooleanQuery.Builder(); List subs = new ArrayList<>(lst.size()); - for (int c = 0 ; c < ((BooleanQuery)lst.get(0)).clauses().size() ; ++c) { + BooleanQuery firstBooleanQuery = firstQuery instanceof BoostQuery + ? (BooleanQuery)((BoostQuery)firstQuery).getQuery() : (BooleanQuery)firstQuery; + for (int c = 0 ; c < firstBooleanQuery.clauses().size() ; ++c) { subs.clear(); // Make a dismax query for each clause position in the boolean per-field queries. for (int n = 0 ; n < lst.size() ; ++n) { - subs.add(((BooleanQuery)lst.get(n)).clauses().get(c).getQuery()); + if (lst.get(n) instanceof BoostQuery) { + BoostQuery boostQuery = (BoostQuery)lst.get(n); + BooleanQuery booleanQuery = (BooleanQuery)boostQuery.getQuery(); + subs.add(new BoostQuery(booleanQuery.clauses().get(c).getQuery(), boostQuery.getBoost())); + } else { + subs.add(((BooleanQuery)lst.get(n)).clauses().get(c).getQuery()); + } } q.add(newBooleanClause(new DisjunctionMaxQuery(subs, a.tie), BooleanClause.Occur.SHOULD)); } @@ -1239,12 +1250,21 @@ protected Query getAliasedMultiTermQuery(List queryTerms) throws SyntaxE } } - /** Recursively examines the given query list for identical structure in all queries. */ + /** + * Recursively examines the given query list for identical structure in all queries. + * Boosts on BoostQuery-s are ignored, and the contained queries are instead used as the basis for comparison. + **/ private boolean allSameQueryStructure(List lst) { boolean allSame = true; Query firstQuery = lst.get(0); + if (firstQuery instanceof BoostQuery) { + firstQuery = ((BoostQuery)firstQuery).getQuery(); // ignore boost; compare contained query + } for (int n = 1 ; n < lst.size(); ++n) { Query nthQuery = lst.get(n); + if (nthQuery instanceof BoostQuery) { + nthQuery = ((BoostQuery)nthQuery).getQuery(); + } if (nthQuery.getClass() != firstQuery.getClass()) { allSame = false; break; @@ -1350,7 +1370,26 @@ protected List getQueries(Alias a) throws SyntaxError { } return lst; } - + + protected List getMultiTermQueries(Alias a) throws SyntaxError { + if (a == null) return null; + if (a.fields.size()==0) return null; + List lst= new ArrayList<>(4); + + for (String f : a.fields.keySet()) { + this.field = f; + Query sub = getAliasedMultiTermQuery(); + if (sub != null) { + Float boost = a.fields.get(f); + if (boost != null && boost.floatValue() != 1f) { + sub = new BoostQuery(sub, boost); + } + lst.add(sub); + } + } + return lst; + } + private Query getQuery() { try { diff --git a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java index 9c4b8920aab3..cb699b25abc4 100644 --- a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java +++ b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java @@ -15,15 +15,17 @@ * limitations under the License. */ package org.apache.solr.search; + +import com.codahale.metrics.MetricRegistry; import org.apache.solr.common.SolrException; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.util.ConcurrentLRUCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; -import java.io.Serializable; import java.lang.invoke.MethodHandles; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -57,6 +59,10 @@ public class FastLRUCache extends SolrCacheBase implements SolrCache private long maxRamBytes; + private MetricsMap cacheMap; + private Set metricNames = new HashSet<>(); + private MetricRegistry registry; + @Override public Object init(Map args, Object persistence, CacheRegenerator regenerator) { super.init(args, regenerator); @@ -215,68 +221,80 @@ public String getDescription() { } @Override - public String getSource() { - return null; + public Set getMetricNames() { + return metricNames; } - @Override - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap<>(); - if (cache == null) return lst; - ConcurrentLRUCache.Stats stats = cache.getStats(); - long lookups = stats.getCumulativeLookups(); - long hits = stats.getCumulativeHits(); - long inserts = stats.getCumulativePuts(); - long evictions = stats.getCumulativeEvictions(); - long size = stats.getCurrentSize(); - long clookups = 0; - long chits = 0; - long cinserts = 0; - long cevictions = 0; - - // NOTE: It is safe to iterate on a CopyOnWriteArrayList - for (ConcurrentLRUCache.Stats statistiscs : statsList) { - clookups += statistiscs.getCumulativeLookups(); - chits += statistiscs.getCumulativeHits(); - cinserts += statistiscs.getCumulativePuts(); - cevictions += statistiscs.getCumulativeEvictions(); - } + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + cacheMap = new MetricsMap((detailed, map) -> { + if (cache != null) { + ConcurrentLRUCache.Stats stats = cache.getStats(); + long lookups = stats.getCumulativeLookups(); + long hits = stats.getCumulativeHits(); + long inserts = stats.getCumulativePuts(); + long evictions = stats.getCumulativeEvictions(); + long size = stats.getCurrentSize(); + long clookups = 0; + long chits = 0; + long cinserts = 0; + long cevictions = 0; + + // NOTE: It is safe to iterate on a CopyOnWriteArrayList + for (ConcurrentLRUCache.Stats statistiscs : statsList) { + clookups += statistiscs.getCumulativeLookups(); + chits += statistiscs.getCumulativeHits(); + cinserts += statistiscs.getCumulativePuts(); + cevictions += statistiscs.getCumulativeEvictions(); + } + + map.put("lookups", lookups); + map.put("hits", hits); + map.put("hitratio", calcHitRatio(lookups, hits)); + map.put("inserts", inserts); + map.put("evictions", evictions); + map.put("size", size); + + map.put("warmupTime", warmupTime); + map.put("cumulative_lookups", clookups); + map.put("cumulative_hits", chits); + map.put("cumulative_hitratio", calcHitRatio(clookups, chits)); + map.put("cumulative_inserts", cinserts); + map.put("cumulative_evictions", cevictions); + + if (detailed && showItems != 0) { + Map items = cache.getLatestAccessedItems( showItems == -1 ? Integer.MAX_VALUE : showItems ); + for (Map.Entry e : (Set )items.entrySet()) { + Object k = e.getKey(); + Object v = e.getValue(); + + String ks = "item_" + k; + String vs = v.toString(); + map.put(ks,vs); + } - lst.add("lookups", lookups); - lst.add("hits", hits); - lst.add("hitratio", calcHitRatio(lookups, hits)); - lst.add("inserts", inserts); - lst.add("evictions", evictions); - lst.add("size", size); - - lst.add("warmupTime", warmupTime); - lst.add("cumulative_lookups", clookups); - lst.add("cumulative_hits", chits); - lst.add("cumulative_hitratio", calcHitRatio(clookups, chits)); - lst.add("cumulative_inserts", cinserts); - lst.add("cumulative_evictions", cevictions); - - if (showItems != 0) { - Map items = cache.getLatestAccessedItems( showItems == -1 ? Integer.MAX_VALUE : showItems ); - for (Map.Entry e : (Set )items.entrySet()) { - Object k = e.getKey(); - Object v = e.getValue(); - - String ks = "item_" + k; - String vs = v.toString(); - lst.add(ks,vs); + } } - - } + }); + manager.registerGauge(this, registryName, cacheMap, true, scope, getCategory().toString()); + } + + // for unit tests only + MetricsMap getMetricsMap() { + return cacheMap; + } - return lst; + @Override + public MetricRegistry getMetricRegistry() { + return registry; } @Override public String toString() { - return name() + getStatistics().toString(); + return name() + cacheMap != null ? cacheMap.getValue().toString() : ""; } + } diff --git a/solr/core/src/java/org/apache/solr/search/Grouping.java b/solr/core/src/java/org/apache/solr/search/Grouping.java index 302383a00a7e..eeb6b66f82dd 100644 --- a/solr/core/src/java/org/apache/solr/search/Grouping.java +++ b/solr/core/src/java/org/apache/solr/search/Grouping.java @@ -47,17 +47,14 @@ import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.grouping.AllGroupHeadsCollector; +import org.apache.lucene.search.grouping.AllGroupsCollector; +import org.apache.lucene.search.grouping.FirstPassGroupingCollector; import org.apache.lucene.search.grouping.GroupDocs; import org.apache.lucene.search.grouping.SearchGroup; +import org.apache.lucene.search.grouping.TermGroupSelector; import org.apache.lucene.search.grouping.TopGroups; -import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector; -import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector; -import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector; -import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupsCollector; -import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector; +import org.apache.lucene.search.grouping.TopGroupsCollector; +import org.apache.lucene.search.grouping.ValueSourceGroupSelector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.mutable.MutableValue; import org.apache.solr.common.SolrException; @@ -700,10 +697,10 @@ private float maxAvoidNaN(float valA, float valB) { public class CommandField extends Command { public String groupBy; - TermFirstPassGroupingCollector firstPass; - TermSecondPassGroupingCollector secondPass; + FirstPassGroupingCollector firstPass; + TopGroupsCollector secondPass; - TermAllGroupsCollector allGroupsCollector; + AllGroupsCollector allGroupsCollector; // If offset falls outside the number of documents a group can provide use this collector instead of secondPass TotalHitCountCollector fallBackCollector; @@ -729,7 +726,7 @@ protected Collector createFirstPassCollector() throws IOException { } groupSort = groupSort == null ? Sort.RELEVANCE : groupSort; - firstPass = new TermFirstPassGroupingCollector(groupBy, groupSort, actualGroupsToFind); + firstPass = new FirstPassGroupingCollector<>(new TermGroupSelector(groupBy), groupSort, actualGroupsToFind); return firstPass; } @@ -739,14 +736,14 @@ protected Collector createFirstPassCollector() throws IOException { @Override protected Collector createSecondPassCollector() throws IOException { if (actualGroupsToFind <= 0) { - allGroupsCollector = new TermAllGroupsCollector(groupBy); + allGroupsCollector = new AllGroupsCollector<>(new TermGroupSelector(groupBy)); return totalCount == TotalCount.grouped ? allGroupsCollector : null; } topGroups = format == Format.grouped ? firstPass.getTopGroups(offset, false) : firstPass.getTopGroups(0, false); if (topGroups == null) { if (totalCount == TotalCount.grouped) { - allGroupsCollector = new TermAllGroupsCollector(groupBy); + allGroupsCollector = new AllGroupsCollector<>(new TermGroupSelector(groupBy)); fallBackCollector = new TotalHitCountCollector(); return MultiCollector.wrap(allGroupsCollector, fallBackCollector); } else { @@ -758,12 +755,12 @@ protected Collector createSecondPassCollector() throws IOException { int groupedDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc); groupedDocsToCollect = Math.max(groupedDocsToCollect, 1); Sort withinGroupSort = this.withinGroupSort != null ? this.withinGroupSort : Sort.RELEVANCE; - secondPass = new TermSecondPassGroupingCollector( - groupBy, topGroups, groupSort, withinGroupSort, groupedDocsToCollect, needScores, needScores, false + secondPass = new TopGroupsCollector<>(new TermGroupSelector(groupBy), + topGroups, groupSort, withinGroupSort, groupedDocsToCollect, needScores, needScores, false ); if (totalCount == TotalCount.grouped) { - allGroupsCollector = new TermAllGroupsCollector(groupBy); + allGroupsCollector = new AllGroupsCollector<>(new TermGroupSelector(groupBy)); return MultiCollector.wrap(secondPass, allGroupsCollector); } else { return secondPass; @@ -776,7 +773,7 @@ protected Collector createSecondPassCollector() throws IOException { @Override public AllGroupHeadsCollector createAllGroupCollector() throws IOException { Sort sortWithinGroup = withinGroupSort != null ? withinGroupSort : Sort.RELEVANCE; - return TermAllGroupHeadsCollector.create(groupBy, sortWithinGroup); + return AllGroupHeadsCollector.newCollector(new TermGroupSelector(groupBy), sortWithinGroup); } /** @@ -921,11 +918,15 @@ public class CommandFunc extends Command { public ValueSource groupBy; Map context; - FunctionFirstPassGroupingCollector firstPass; - FunctionSecondPassGroupingCollector secondPass; + private ValueSourceGroupSelector newSelector() { + return new ValueSourceGroupSelector(groupBy, context); + } + + FirstPassGroupingCollector firstPass; + TopGroupsCollector secondPass; // If offset falls outside the number of documents a group can provide use this collector instead of secondPass TotalHitCountCollector fallBackCollector; - FunctionAllGroupsCollector allGroupsCollector; + AllGroupsCollector allGroupsCollector; Collection> topGroups; /** @@ -950,7 +951,7 @@ protected Collector createFirstPassCollector() throws IOException { } groupSort = groupSort == null ? Sort.RELEVANCE : groupSort; - firstPass = new FunctionFirstPassGroupingCollector(groupBy, context, searcher.weightSort(groupSort), actualGroupsToFind); + firstPass = new FirstPassGroupingCollector<>(newSelector(), searcher.weightSort(groupSort), actualGroupsToFind); return firstPass; } @@ -960,14 +961,14 @@ protected Collector createFirstPassCollector() throws IOException { @Override protected Collector createSecondPassCollector() throws IOException { if (actualGroupsToFind <= 0) { - allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context); + allGroupsCollector = new AllGroupsCollector<>(newSelector()); return totalCount == TotalCount.grouped ? allGroupsCollector : null; } topGroups = format == Format.grouped ? firstPass.getTopGroups(offset, false) : firstPass.getTopGroups(0, false); if (topGroups == null) { if (totalCount == TotalCount.grouped) { - allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context); + allGroupsCollector = new AllGroupsCollector<>(newSelector()); fallBackCollector = new TotalHitCountCollector(); return MultiCollector.wrap(allGroupsCollector, fallBackCollector); } else { @@ -979,12 +980,12 @@ protected Collector createSecondPassCollector() throws IOException { int groupdDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc); groupdDocsToCollect = Math.max(groupdDocsToCollect, 1); Sort withinGroupSort = this.withinGroupSort != null ? this.withinGroupSort : Sort.RELEVANCE; - secondPass = new FunctionSecondPassGroupingCollector( - topGroups, groupSort, withinGroupSort, groupdDocsToCollect, needScores, needScores, false, groupBy, context + secondPass = new TopGroupsCollector<>(newSelector(), + topGroups, groupSort, withinGroupSort, groupdDocsToCollect, needScores, needScores, false ); if (totalCount == TotalCount.grouped) { - allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context); + allGroupsCollector = new AllGroupsCollector<>(newSelector()); return MultiCollector.wrap(secondPass, allGroupsCollector); } else { return secondPass; @@ -994,7 +995,7 @@ protected Collector createSecondPassCollector() throws IOException { @Override public AllGroupHeadsCollector createAllGroupCollector() throws IOException { Sort sortWithinGroup = withinGroupSort != null ? withinGroupSort : Sort.RELEVANCE; - return new FunctionAllGroupHeadsCollector(groupBy, context, sortWithinGroup); + return AllGroupHeadsCollector.newCollector(newSelector(), sortWithinGroup); } /** diff --git a/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java index bf64060aea1e..fca9a341119d 100644 --- a/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java +++ b/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java @@ -84,7 +84,7 @@ Query parseJoin() throws SyntaxError { long fromCoreOpenTime = 0; if (fromIndex != null && !fromIndex.equals(req.getCore().getCoreDescriptor().getName()) ) { - CoreContainer container = req.getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer container = req.getCore().getCoreContainer(); // if in SolrCloud mode, fromIndex should be the name of a single-sharded collection coreName = ScoreJoinQParserPlugin.getCoreName(fromIndex, container); @@ -173,7 +173,7 @@ public JoinQueryWeight(SolrIndexSearcher searcher, float boost) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cross-core join must have SolrRequestInfo"); } - CoreContainer container = searcher.getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer container = searcher.getCore().getCoreContainer(); final SolrCore fromCore = container.getCore(fromIndex); if (fromCore == null) { diff --git a/solr/core/src/java/org/apache/solr/search/LFUCache.java b/solr/core/src/java/org/apache/solr/search/LFUCache.java index 2b593c6f57ad..82ba6d265364 100644 --- a/solr/core/src/java/org/apache/solr/search/LFUCache.java +++ b/solr/core/src/java/org/apache/solr/search/LFUCache.java @@ -15,19 +15,19 @@ * limitations under the License. */ package org.apache.solr.search; -import java.io.Serializable; + import java.lang.invoke.MethodHandles; -import java.net.URL; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; +import com.codahale.metrics.MetricRegistry; import org.apache.solr.common.SolrException; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; -import org.apache.solr.core.SolrCore; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.util.ConcurrentLFUCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,6 +64,9 @@ public class LFUCache implements SolrCache { private ConcurrentLFUCache cache; private int showItems = 0; private Boolean timeDecay = true; + private MetricsMap cacheMap; + private Set metricNames = new HashSet<>(); + private MetricRegistry registry; @Override public Object init(Map args, Object persistence, CacheRegenerator regenerator) { @@ -211,11 +214,6 @@ public String getName() { return LFUCache.class.getName(); } - @Override - public String getVersion() { - return SolrCore.version; - } - @Override public String getDescription() { return description; @@ -226,16 +224,6 @@ public Category getCategory() { return Category.CACHE; } - @Override - public String getSource() { - return null; - } - - @Override - public URL[] getDocs() { - return null; - } - // returns a ratio, not a percent. private static String calcHitRatio(long lookups, long hits) { if (lookups == 0) return "0.00"; @@ -246,62 +234,81 @@ private static String calcHitRatio(long lookups, long hits) { } @Override - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap<>(); - if (cache == null) return lst; - ConcurrentLFUCache.Stats stats = cache.getStats(); - long lookups = stats.getCumulativeLookups(); - long hits = stats.getCumulativeHits(); - long inserts = stats.getCumulativePuts(); - long evictions = stats.getCumulativeEvictions(); - long size = stats.getCurrentSize(); - - lst.add("lookups", lookups); - lst.add("hits", hits); - lst.add("hitratio", calcHitRatio(lookups, hits)); - lst.add("inserts", inserts); - lst.add("evictions", evictions); - lst.add("size", size); - - lst.add("warmupTime", warmupTime); - lst.add("timeDecay", timeDecay); - - long clookups = 0; - long chits = 0; - long cinserts = 0; - long cevictions = 0; - - // NOTE: It is safe to iterate on a CopyOnWriteArrayList - for (ConcurrentLFUCache.Stats statistics : statsList) { - clookups += statistics.getCumulativeLookups(); - chits += statistics.getCumulativeHits(); - cinserts += statistics.getCumulativePuts(); - cevictions += statistics.getCumulativeEvictions(); - } - lst.add("cumulative_lookups", clookups); - lst.add("cumulative_hits", chits); - lst.add("cumulative_hitratio", calcHitRatio(clookups, chits)); - lst.add("cumulative_inserts", cinserts); - lst.add("cumulative_evictions", cevictions); - - if (showItems != 0) { - Map items = cache.getMostUsedItems(showItems == -1 ? Integer.MAX_VALUE : showItems); - for (Map.Entry e : (Set) items.entrySet()) { - Object k = e.getKey(); - Object v = e.getValue(); - - String ks = "item_" + k; - String vs = v.toString(); - lst.add(ks, vs); + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + cacheMap = new MetricsMap((detailed, map) -> { + if (cache != null) { + ConcurrentLFUCache.Stats stats = cache.getStats(); + long lookups = stats.getCumulativeLookups(); + long hits = stats.getCumulativeHits(); + long inserts = stats.getCumulativePuts(); + long evictions = stats.getCumulativeEvictions(); + long size = stats.getCurrentSize(); + + map.put("lookups", lookups); + map.put("hits", hits); + map.put("hitratio", calcHitRatio(lookups, hits)); + map.put("inserts", inserts); + map.put("evictions", evictions); + map.put("size", size); + + map.put("warmupTime", warmupTime); + map.put("timeDecay", timeDecay); + + long clookups = 0; + long chits = 0; + long cinserts = 0; + long cevictions = 0; + + // NOTE: It is safe to iterate on a CopyOnWriteArrayList + for (ConcurrentLFUCache.Stats statistics : statsList) { + clookups += statistics.getCumulativeLookups(); + chits += statistics.getCumulativeHits(); + cinserts += statistics.getCumulativePuts(); + cevictions += statistics.getCumulativeEvictions(); + } + map.put("cumulative_lookups", clookups); + map.put("cumulative_hits", chits); + map.put("cumulative_hitratio", calcHitRatio(clookups, chits)); + map.put("cumulative_inserts", cinserts); + map.put("cumulative_evictions", cevictions); + + if (detailed && showItems != 0) { + Map items = cache.getMostUsedItems(showItems == -1 ? Integer.MAX_VALUE : showItems); + for (Map.Entry e : (Set) items.entrySet()) { + Object k = e.getKey(); + Object v = e.getValue(); + + String ks = "item_" + k; + String vs = v.toString(); + map.put(ks, vs); + } + + } + } + }); + manager.registerGauge(this, registryName, cacheMap, true, scope, getCategory().toString()); + } - } + // for unit tests only + MetricsMap getMetricsMap() { + return cacheMap; + } - return lst; + @Override + public Set getMetricNames() { + return metricNames; + } + + @Override + public MetricRegistry getMetricRegistry() { + return registry; } @Override public String toString() { - return name + getStatistics().toString(); + return name + cacheMap != null ? cacheMap.getValue().toString() : ""; } + } diff --git a/solr/core/src/java/org/apache/solr/search/LRUCache.java b/solr/core/src/java/org/apache/solr/search/LRUCache.java index b178fb21b1fe..ce206fe2f7e3 100644 --- a/solr/core/src/java/org/apache/solr/search/LRUCache.java +++ b/solr/core/src/java/org/apache/solr/search/LRUCache.java @@ -19,18 +19,21 @@ import java.lang.invoke.MethodHandles; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; +import com.codahale.metrics.MetricRegistry; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.RamUsageEstimator; import org.apache.solr.common.SolrException; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,6 +58,7 @@ public class LRUCache extends SolrCacheBase implements SolrCache, Acco static final long LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY = HASHTABLE_RAM_BYTES_PER_ENTRY + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF; // previous & next references + /// End copied code /* An instance of this class will be shared across multiple instances @@ -82,6 +86,9 @@ private static class CumulativeStats { private Map map; private String description="LRU Cache"; + private MetricsMap cacheMap; + private Set metricNames = new HashSet<>(); + private MetricRegistry registry; private long maxRamBytes = Long.MAX_VALUE; // The synchronization used for the map will be used to update this, @@ -319,45 +326,56 @@ public String getDescription() { } @Override - public String getSource() { - return null; + public Set getMetricNames() { + return metricNames; } @Override - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); - synchronized (map) { - lst.add("lookups", lookups); - lst.add("hits", hits); - lst.add("hitratio", calcHitRatio(lookups,hits)); - lst.add("inserts", inserts); - lst.add("evictions", evictions); - lst.add("size", map.size()); + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + cacheMap = new MetricsMap((detailed, res) -> { + synchronized (map) { + res.put("lookups", lookups); + res.put("hits", hits); + res.put("hitratio", calcHitRatio(lookups,hits)); + res.put("inserts", inserts); + res.put("evictions", evictions); + res.put("size", map.size()); + if (maxRamBytes != Long.MAX_VALUE) { + res.put("maxRamMB", maxRamBytes / 1024L / 1024L); + res.put("ramBytesUsed", ramBytesUsed()); + res.put("evictionsRamUsage", evictionsRamUsage); + } + } + res.put("warmupTime", warmupTime); + + long clookups = stats.lookups.longValue(); + long chits = stats.hits.longValue(); + res.put("cumulative_lookups", clookups); + res.put("cumulative_hits", chits); + res.put("cumulative_hitratio", calcHitRatio(clookups, chits)); + res.put("cumulative_inserts", stats.inserts.longValue()); + res.put("cumulative_evictions", stats.evictions.longValue()); if (maxRamBytes != Long.MAX_VALUE) { - lst.add("maxRamMB", maxRamBytes / 1024L / 1024L); - lst.add("ramBytesUsed", ramBytesUsed()); - lst.add("evictionsRamUsage", evictionsRamUsage); + res.put("cumulative_evictionsRamUsage", stats.evictionsRamUsage.longValue()); } - } - lst.add("warmupTime", warmupTime); - - long clookups = stats.lookups.longValue(); - long chits = stats.hits.longValue(); - lst.add("cumulative_lookups", clookups); - lst.add("cumulative_hits", chits); - lst.add("cumulative_hitratio", calcHitRatio(clookups, chits)); - lst.add("cumulative_inserts", stats.inserts.longValue()); - lst.add("cumulative_evictions", stats.evictions.longValue()); - if (maxRamBytes != Long.MAX_VALUE) { - lst.add("cumulative_evictionsRamUsage", stats.evictionsRamUsage.longValue()); - } - - return lst; + }); + manager.registerGauge(this, registryName, cacheMap, true, scope, getCategory().toString()); + } + + // for unit tests only + MetricsMap getMetricsMap() { + return cacheMap; + } + + @Override + public MetricRegistry getMetricRegistry() { + return registry; } @Override public String toString() { - return name() + getStatistics().toString(); + return name() + cacheMap != null ? cacheMap.getValue().toString() : ""; } @Override diff --git a/solr/core/src/java/org/apache/solr/search/QParserPlugin.java b/solr/core/src/java/org/apache/solr/search/QParserPlugin.java index 34089d201a02..872c618afaab 100644 --- a/solr/core/src/java/org/apache/solr/search/QParserPlugin.java +++ b/solr/core/src/java/org/apache/solr/search/QParserPlugin.java @@ -16,14 +16,14 @@ */ package org.apache.solr.search; -import java.net.URL; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.search.join.BlockJoinChildQParserPlugin; import org.apache.solr.search.join.BlockJoinParentQParserPlugin; @@ -31,7 +31,7 @@ import org.apache.solr.search.mlt.MLTQParserPlugin; import org.apache.solr.util.plugin.NamedListInitializedPlugin; -public abstract class QParserPlugin implements NamedListInitializedPlugin, SolrInfoMBean { +public abstract class QParserPlugin implements NamedListInitializedPlugin, SolrInfoBean { /** internal use - name of the default parser */ public static final String DEFAULT_QTYPE = LuceneQParserPlugin.NAME; @@ -98,11 +98,6 @@ public String getName() { return this.getClass().getName(); } - @Override - public String getVersion() { - return null; - } - @Override public String getDescription() { return ""; // UI required non-null to work @@ -114,19 +109,10 @@ public Category getCategory() { } @Override - public String getSource() { + public Set getMetricNames() { return null; } - @Override - public URL[] getDocs() { - return new URL[0]; - } - - @Override - public NamedList getStatistics() { - return null; - } } diff --git a/solr/core/src/java/org/apache/solr/search/QueryParsing.java b/solr/core/src/java/org/apache/solr/search/QueryParsing.java index 692de1a70979..da16d86b6e8e 100644 --- a/solr/core/src/java/org/apache/solr/search/QueryParsing.java +++ b/solr/core/src/java/org/apache/solr/search/QueryParsing.java @@ -171,33 +171,6 @@ public static int parseLocalParams(String txt, int start, ModifiableSolrParams t } - public static String encodeLocalParamVal(String val) { - int len = val.length(); - int i = 0; - if (len > 0 && val.charAt(0) != '$') { - for (;i=len) return val; - - // We need to enclose in quotes... but now we need to escape - StringBuilder sb = new StringBuilder(val.length() + 4); - sb.append('\''); - for (i=0; i extends SolrInfoMBean { +public interface SolrCache extends SolrInfoBean, SolrMetricProducer { /** * The initialization routine. Instance specific arguments are passed in diff --git a/solr/core/src/java/org/apache/solr/search/SolrCacheBase.java b/solr/core/src/java/org/apache/solr/search/SolrCacheBase.java index 85caa90cfeec..c388d5480361 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrCacheBase.java +++ b/solr/core/src/java/org/apache/solr/search/SolrCacheBase.java @@ -18,11 +18,10 @@ import java.math.BigDecimal; import java.math.RoundingMode; -import java.net.URL; import java.util.Map; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean.Category; +import org.apache.solr.core.SolrInfoBean.Category; import org.apache.solr.search.SolrCache.State; import static org.apache.solr.common.params.CommonParams.NAME; @@ -106,10 +105,6 @@ public Category getCategory() { return Category.CACHE; } - public URL[] getDocs() { - return null; - } - public void init(Map args, CacheRegenerator regenerator) { this.regenerator = regenerator; state = State.CREATED; diff --git a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheBean.java b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheBean.java new file mode 100644 index 000000000000..ffcc37d64cf0 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheBean.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.search; + +import java.util.HashSet; +import java.util.Set; + +import com.codahale.metrics.MetricRegistry; +import org.apache.solr.core.SolrInfoBean; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; +import org.apache.solr.uninverting.UninvertingReader; + +/** + * A SolrInfoBean that provides introspection of the Solr FieldCache + * + */ +public class SolrFieldCacheBean implements SolrInfoBean, SolrMetricProducer { + + private boolean disableEntryList = Boolean.getBoolean("disableSolrFieldCacheMBeanEntryList"); + private boolean disableJmxEntryList = Boolean.getBoolean("disableSolrFieldCacheMBeanEntryListJmx"); + + private MetricRegistry registry; + private Set metricNames = new HashSet<>(); + + @Override + public String getName() { return this.getClass().getName(); } + @Override + public String getDescription() { + return "Provides introspection of the Solr FieldCache "; + } + @Override + public Category getCategory() { return Category.CACHE; } + @Override + public Set getMetricNames() { + return metricNames; + } + @Override + public MetricRegistry getMetricRegistry() { + return registry; + } + + @Override + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + MetricsMap metricsMap = new MetricsMap((detailed, map) -> { + if (detailed && !disableEntryList && !disableJmxEntryList) { + UninvertingReader.FieldCacheStats fieldCacheStats = UninvertingReader.getUninvertedStats(); + String[] entries = fieldCacheStats.info; + map.put("entries_count", entries.length); + map.put("total_size", fieldCacheStats.totalSize); + for (int i = 0; i < entries.length; i++) { + final String entry = entries[i]; + map.put("entry#" + i, entry); + } + } else { + map.put("entries_count", UninvertingReader.getUninvertedStatsSize()); + } + }); + manager.register(this, registryName, metricsMap, true, "fieldCache", Category.CACHE.toString(), scope); + } +} diff --git a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java b/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java deleted file mode 100644 index 642b7087846d..000000000000 --- a/solr/core/src/java/org/apache/solr/search/SolrFieldCacheMBean.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.search; - -import java.net.URL; - -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; -import org.apache.solr.core.JmxMonitoredMap.JmxAugmentedSolrInfoMBean; -import org.apache.solr.core.SolrCore; -import org.apache.solr.uninverting.UninvertingReader; - -/** - * A SolrInfoMBean that provides introspection of the Solr FieldCache - * - */ -public class SolrFieldCacheMBean implements JmxAugmentedSolrInfoMBean { - - private boolean disableEntryList = Boolean.getBoolean("disableSolrFieldCacheMBeanEntryList"); - private boolean disableJmxEntryList = Boolean.getBoolean("disableSolrFieldCacheMBeanEntryListJmx"); - - @Override - public String getName() { return this.getClass().getName(); } - @Override - public String getVersion() { return SolrCore.version; } - @Override - public String getDescription() { - return "Provides introspection of the Solr FieldCache "; - } - @Override - public Category getCategory() { return Category.CACHE; } - @Override - public String getSource() { return null; } - @Override - public URL[] getDocs() { - return null; - } - @Override - public NamedList getStatistics() { - return getStats(!disableEntryList); - } - - @Override - public NamedList getStatisticsForJmx() { - return getStats(!disableEntryList && !disableJmxEntryList); - } - - private NamedList getStats(boolean listEntries) { - NamedList stats = new SimpleOrderedMap(); - if (listEntries) { - UninvertingReader.FieldCacheStats fieldCacheStats = UninvertingReader.getUninvertedStats(); - String[] entries = fieldCacheStats.info; - stats.add("entries_count", entries.length); - stats.add("total_size", fieldCacheStats.totalSize); - for (int i = 0; i < entries.length; i++) { - stats.add("entry#" + i, entries[i]); - } - } else { - stats.add("entries_count", UninvertingReader.getUninvertedStatsSize()); - } - return stats; - } - -} diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java index 4b2e0f714ae6..900267ce4794 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java +++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java @@ -19,13 +19,13 @@ import java.io.Closeable; import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import com.codahale.metrics.MetricRegistry; import com.google.common.collect.Iterables; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -58,15 +59,15 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.ObjectReleaseTracker; -import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.core.DirectoryFactory; import org.apache.solr.core.DirectoryFactory.DirContext; import org.apache.solr.core.SolrConfig; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.index.SlowCompositeReaderWrapper; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestInfo; @@ -86,7 +87,7 @@ * * @since solr 0.9 */ -public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrInfoMBean { +public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrInfoBean, SolrMetricProducer { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -136,7 +137,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI private final String path; private boolean releaseDirectory; - private final NamedList readerStats; + private Set metricNames = new HashSet<>(); private static DirectoryReader getReader(SolrCore core, SolrIndexConfig config, DirectoryFactory directoryFactory, String path) throws IOException { @@ -154,10 +155,25 @@ private static DirectoryReader getReader(SolrCore core, SolrIndexConfig config, private static DirectoryReader wrapReader(SolrCore core, DirectoryReader reader) throws IOException { assert reader != null; return ExitableDirectoryReader.wrap( - UninvertingReader.wrap(reader, core.getLatestSchema().getUninversionMap(reader)), + wrapUninvertingReaderPerSegment(core, reader), SolrQueryTimeoutImpl.getInstance()); } + /** + * If docvalues are enabled or disabled after data has already been indexed for a field, such that + * only some segments have docvalues, uninverting on the top level reader will cause + * IllegalStateException to be thrown when trying to use a field with such mixed data. This is because + * the {@link IndexSchema#getUninversionMap(IndexReader)} method decides to put a field + * into the uninverteding map only if *NO* segment in the index contains docvalues for that field. + * + * Therefore, this class provides a uninverting map per segment such that for any field, + * DocValues are used from segments if they exist and uninversion of the field is performed on the rest + * of the segments. + */ + private static DirectoryReader wrapUninvertingReaderPerSegment(SolrCore core, DirectoryReader reader) throws IOException { + return UninvertingReader.wrap(reader, r -> core.getLatestSchema().getUninversionMap(r)); + } + /** * Builds the necessary collector chain (via delegate wrapping) and executes the query against it. This method takes * into consideration both the explicitly provided collector and postFilter as well as any needed collector wrappers @@ -302,7 +318,6 @@ public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, String // We already have our own filter cache setQueryCache(null); - readerStats = snapStatistics(reader); // do this at the end since an exception in the constructor means we won't close numOpens.incrementAndGet(); assert ObjectReleaseTracker.track(this); @@ -404,10 +419,10 @@ public final DirectoryReader getIndexReader() { } /** - * Register sub-objects such as caches + * Register sub-objects such as caches and our own metrics */ public void register() { - final Map infoRegistry = core.getInfoRegistry(); + final Map infoRegistry = core.getInfoRegistry(); // register self infoRegistry.put(STATISTICS_KEY, this); infoRegistry.put(name, this); @@ -415,6 +430,12 @@ public void register() { cache.setState(SolrCache.State.LIVE); infoRegistry.put(cache.name(), cache); } + SolrMetricManager manager = core.getCoreContainer().getMetricManager(); + String registry = core.getCoreMetricManager().getRegistryName(); + for (SolrCache cache : cacheList) { + cache.initializeMetrics(manager, registry, SolrMetricManager.mkName(cache.name(), STATISTICS_KEY)); + } + initializeMetrics(manager, registry, STATISTICS_KEY); registerTime = new Date(); } @@ -2186,7 +2207,7 @@ public IndexFingerprint getIndexFingerprint(long maxVersion) throws IOException ///////////////////////////////////////////////////////////////////// - // SolrInfoMBean stuff: Statistics and Module Info + // SolrInfoBean stuff: Statistics and Module Info ///////////////////////////////////////////////////////////////////// @Override @@ -2194,11 +2215,6 @@ public String getName() { return SolrIndexSearcher.class.getName(); } - @Override - public String getVersion() { - return SolrCore.version; - } - @Override public String getDescription() { return "index searcher"; @@ -2210,38 +2226,31 @@ public Category getCategory() { } @Override - public String getSource() { - return null; + public Set getMetricNames() { + return metricNames; } @Override - public URL[] getDocs() { - return null; + public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { + + manager.registerGauge(this, registry, () -> name, true, "searcherName", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> cachingEnabled, true, "caching", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> openTime, true, "openedAt", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> warmupTime, true, "warmupTime", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> registerTime, true, "registeredAt", Category.SEARCHER.toString(), scope); + // reader stats + manager.registerGauge(this, registry, () -> reader.numDocs(), true, "numDocs", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> reader.maxDoc(), true, "maxDoc", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> reader.maxDoc() - reader.numDocs(), true, "deletedDocs", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> reader.toString(), true, "reader", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> reader.directory().toString(), true, "readerDir", Category.SEARCHER.toString(), scope); + manager.registerGauge(this, registry, () -> reader.getVersion(), true, "indexVersion", Category.SEARCHER.toString(), scope); + } @Override - public NamedList getStatistics() { - final NamedList lst = new SimpleOrderedMap<>(); - lst.add("searcherName", name); - lst.add("caching", cachingEnabled); - - lst.addAll(readerStats); - - lst.add("openedAt", openTime); - if (registerTime != null) lst.add("registeredAt", registerTime); - lst.add("warmupTime", warmupTime); - return lst; - } - - static private NamedList snapStatistics(DirectoryReader reader) { - final NamedList lst = new SimpleOrderedMap<>(); - lst.add("numDocs", reader.numDocs()); - lst.add("maxDoc", reader.maxDoc()); - lst.add("deletedDocs", reader.maxDoc() - reader.numDocs()); - lst.add("reader", reader.toString()); - lst.add("readerDir", reader.directory()); - lst.add("indexVersion", reader.getVersion()); - return lst; + public MetricRegistry getMetricRegistry() { + return core.getMetricRegistry(); } private static class FilterImpl extends Filter { diff --git a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java index c2b8a5d4118d..b802c41d7b37 100644 --- a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java +++ b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java @@ -58,9 +58,11 @@ import org.apache.solr.search.facet.MaxAgg; import org.apache.solr.search.facet.MinAgg; import org.apache.solr.search.facet.PercentileAgg; +import org.apache.solr.search.facet.StddevAgg; import org.apache.solr.search.facet.SumAgg; import org.apache.solr.search.facet.SumsqAgg; import org.apache.solr.search.facet.UniqueAgg; +import org.apache.solr.search.facet.VarianceAgg; import org.apache.solr.search.function.CollapseScoreFunction; import org.apache.solr.search.function.OrdFieldSource; import org.apache.solr.search.function.ReverseOrdFieldSource; @@ -931,14 +933,21 @@ public ValueSource parse(FunctionQParser fp) throws SyntaxError { } }); - /*** - addParser("agg_stdev", new ValueSourceParser() { - @Override - public ValueSource parse(FunctionQParser fp) throws SyntaxError { - return null; - } + addParser("agg_variance", new ValueSourceParser() { + @Override + public ValueSource parse(FunctionQParser fp) throws SyntaxError { + return new VarianceAgg(fp.parseValueSource()); + } }); - + + addParser("agg_stddev", new ValueSourceParser() { + @Override + public ValueSource parse(FunctionQParser fp) throws SyntaxError { + return new StddevAgg(fp.parseValueSource()); + } + }); + + /*** addParser("agg_multistat", new ValueSourceParser() { @Override public ValueSource parse(FunctionQParser fp) throws SyntaxError { diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java index 9ec5d79430dc..4f57bcd90c06 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java +++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java @@ -82,7 +82,7 @@ protected void merge(SimpleOrderedMap facetResult, Context mcontext) { Object nb = facetResult.get("numBuckets"); if (nb != null) { if (numBuckets == null) { - numBuckets = new FacetNumBucketsMerger(); + numBuckets = new HLLAgg("hll_merger").createFacetMerger(nb); } numBuckets.merge(nb , mcontext); } @@ -98,17 +98,7 @@ public Object getMergedResult() { SimpleOrderedMap result = new SimpleOrderedMap(); if (numBuckets != null) { - int removed = 0; - if (freq.mincount > 1) { - for (FacetBucket bucket : buckets.values()) { - if (bucket.count < freq.mincount) removed++; - } - } - result.add("numBuckets", ((Number)numBuckets.getMergedResult()).longValue() - removed); - - // TODO: we can further increase this estimate. - // If not sorting by count, use a simple ratio to scale - // If sorting by count desc, then add up the highest_possible_missing_count from each shard + result.add("numBuckets", ((Number)numBuckets.getMergedResult()).longValue()); } sortBuckets(); diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java index d4daf08d80ae..143d1fce0268 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java +++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java @@ -210,10 +210,6 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal IntFunction bucketValFromSlotNumFunc, Function fieldQueryValFunc) throws IOException { int numBuckets = 0; - List bucketVals = null; - if (freq.numBuckets && fcontext.isShard()) { - bucketVals = new ArrayList<>(100); - } final int off = fcontext.isShard() ? 0 : (int) freq.offset; @@ -221,9 +217,11 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal if (freq.limit >= 0) { effectiveLimit = freq.limit; if (fcontext.isShard()) { - // add over-request if this is a shard request if (freq.overrequest == -1) { - effectiveLimit = (long) (effectiveLimit*1.1+4); // default: add 10% plus 4 (to overrequest for very small limits) + // add over-request if this is a shard request and if we have a small offset (large offsets will already be gathering many more buckets than needed) + if (freq.offset < 10) { + effectiveLimit = (long) (effectiveLimit * 1.1 + 4); // default: add 10% plus 4 (to overrequest for very small limits) + } } else { effectiveLimit += freq.overrequest; } @@ -233,7 +231,7 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal final int sortMul = freq.sortDirection.getMultiplier(); - int maxTopVals = (int) (effectiveLimit >= 0 ? Math.min(off + effectiveLimit, Integer.MAX_VALUE - 1) : Integer.MAX_VALUE - 1); + int maxTopVals = (int) (effectiveLimit >= 0 ? Math.min(freq.offset + effectiveLimit, Integer.MAX_VALUE - 1) : Integer.MAX_VALUE - 1); maxTopVals = Math.min(maxTopVals, slotCardinality); final SlotAcc sortAcc = this.sortAcc, indexOrderAcc = this.indexOrderAcc; final BiPredicate orderPredicate; @@ -257,16 +255,18 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal Slot bottom = null; Slot scratchSlot = new Slot(); for (int slotNum = 0; slotNum < numSlots; slotNum++) { - // screen out buckets not matching mincount immediately (i.e. don't even increment numBuckets) - if (effectiveMincount > 0 && countAcc.getCount(slotNum) < effectiveMincount) { - continue; + + // screen out buckets not matching mincount + if (effectiveMincount > 0) { + int count = countAcc.getCount(slotNum); + if (count < effectiveMincount) { + if (count > 0) + numBuckets++; // Still increment numBuckets as long as we have some count. This is for consistency between distrib and non-distrib mode. + continue; + } } numBuckets++; - if (bucketVals != null && bucketVals.size()<100) { - Object val = bucketValFromSlotNumFunc.apply(slotNum); - bucketVals.add(val); - } if (bottom != null) { scratchSlot.slot = slotNum; // scratchSlot is only used to hold this slotNum for the following line @@ -292,10 +292,7 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal if (!fcontext.isShard()) { res.add("numBuckets", numBuckets); } else { - SimpleOrderedMap map = new SimpleOrderedMap<>(2); - map.add("numBuckets", numBuckets); - map.add("vals", bucketVals); - res.add("numBuckets", map); + calculateNumBuckets(res); } } @@ -320,7 +317,7 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal // if we are deep paging, we don't have to order the highest "offset" counts. int collectCount = Math.max(0, queue.size() - off); - assert collectCount <= effectiveLimit; + assert collectCount <= maxTopVals; int[] sortedSlots = new int[collectCount]; for (int i = collectCount - 1; i >= 0; i--) { sortedSlots[i] = queue.pop().slot; @@ -346,6 +343,20 @@ SimpleOrderedMap findTopSlots(final int numSlots, final int slotCardinal return res; } + private void calculateNumBuckets(SimpleOrderedMap target) throws IOException { + DocSet domain = fcontext.base; + if (freq.prefix != null) { + Query prefixFilter = sf.getType().getPrefixQuery(null, sf, freq.prefix); + domain = fcontext.searcher.getDocSet(prefixFilter, domain); + } + + HLLAgg agg = new HLLAgg(freq.field); + SlotAcc acc = agg.createSlotAcc(fcontext, domain.size(), 1); + acc.collect(domain, 0); + acc.key = "numBuckets"; + acc.setValues(target, 0); + } + private static class Slot { int slot; } @@ -522,7 +533,6 @@ public void resize(Resizer resizer) { "all", {"cat3":{"_l":["A"]}}]]}, "cat1":{"_l":["A"]}}} - */ static List asList(Object list) { @@ -578,6 +588,10 @@ protected SimpleOrderedMap refineFacets() throws IOException { } } + if (freq.numBuckets && !skipThisFacet) { + calculateNumBuckets(res); + } + // If there are just a couple of leaves, and if the domain is large, then // going by term is likely the most efficient? // If the domain is small, or if the number of leaves is large, then doing diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java index c19d55dbbae5..228678b7c837 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java +++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessorByArray.java @@ -66,7 +66,7 @@ private SimpleOrderedMap calcFacets() throws IOException { refineResult = refineFacets(); // if we've seen this facet bucket, then refining can be done. If we haven't, we still // only need to continue if we need allBuckets or numBuckets info. - if (skipThisFacet || (!freq.allBuckets && !freq.numBuckets)) return refineResult; + if (skipThisFacet || !freq.allBuckets) return refineResult; } String prefix = freq.prefix; diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java b/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java index bf1379162ef5..3407ae41c1b2 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java +++ b/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java @@ -319,12 +319,6 @@ public String getDescription() { public Category getCategory() { return Category.QUERY; } - - @Override - public String getSource() { - return null; - } - } diff --git a/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java b/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java index 3da3541b7ed1..1d8aecb2e5f4 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java +++ b/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java @@ -33,7 +33,7 @@ import java.util.List; /** - * Accumulates statistics separated by a slot number. + * Accumulates statistics separated by a slot number. * There is a separate statistic per slot. The slot is usually an ordinal into a set of values, e.g. tracking a count * frequency per term. * Sometimes there doesn't need to be a slot distinction, in which case there is just one nominal slot. @@ -46,8 +46,7 @@ public SlotAcc(FacetContext fcontext) { this.fcontext = fcontext; } - public void setNextReader(LeafReaderContext readerContext) throws IOException { - } + public void setNextReader(LeafReaderContext readerContext) throws IOException {} public abstract void collect(int doc, int slot) throws IOException; @@ -61,7 +60,7 @@ public int collect(DocSet docs, int slot) throws IOException { int segBase = 0; int segMax; int adjustedMax = 0; - for (DocIterator docsIt = docs.iterator(); docsIt.hasNext(); ) { + for (DocIterator docsIt = docs.iterator(); docsIt.hasNext();) { final int doc = docsIt.nextDoc(); if (doc >= adjustedMax) { do { @@ -78,12 +77,11 @@ public int collect(DocSet docs, int slot) throws IOException { setNextReader(ctx); } count++; - collect(doc - segBase, slot); // per-seg collectors + collect(doc - segBase, slot); // per-seg collectors } return count; } - public abstract int compare(int slotA, int slotB); public abstract Object getValue(int slotNum) throws IOException; @@ -101,8 +99,7 @@ public void setValues(SimpleOrderedMap bucket, int slotNum) throws IOExc public abstract void resize(Resizer resizer); @Override - public void close() throws IOException { - } + public void close() throws IOException {} public static abstract class Resizer { public abstract int getNewSize(); @@ -181,15 +178,14 @@ public void setNextReader(LeafReaderContext readerContext) throws IOException { } } - -// have a version that counts the number of times a Slot has been hit? (for avg... what else?) +// have a version that counts the number of times a Slot has been hit? (for avg... what else?) // TODO: make more sense to have func as the base class rather than double? // double-slot-func -> func-slot -> slot -> acc // double-slot-func -> double-slot -> slot -> acc abstract class DoubleFuncSlotAcc extends FuncSlotAcc { - double[] result; // TODO: use DoubleArray + double[] result; // TODO: use DoubleArray double initialValue; public DoubleFuncSlotAcc(ValueSource values, FacetContext fcontext, int numSlots) { @@ -210,7 +206,6 @@ public int compare(int slotA, int slotB) { return Double.compare(result[slotA], result[slotB]); } - @Override public Object getValue(int slot) { return result[slot]; @@ -228,7 +223,7 @@ public void resize(Resizer resizer) { } abstract class IntSlotAcc extends SlotAcc { - int[] result; // use LongArray32 + int[] result; // use LongArray32 int initialValue; public IntSlotAcc(FacetContext fcontext, int numSlots, int initialValue) { @@ -261,15 +256,13 @@ public void resize(Resizer resizer) { } } - - class SumSlotAcc extends DoubleFuncSlotAcc { public SumSlotAcc(ValueSource values, FacetContext fcontext, int numSlots) { super(values, fcontext, numSlots); } public void collect(int doc, int slotNum) throws IOException { - double val = values.doubleVal(doc); // todo: worth trying to share this value across multiple stats that need it? + double val = values.doubleVal(doc); // todo: worth trying to share this value across multiple stats that need it? result[slotNum] += val; } } @@ -287,8 +280,6 @@ public void collect(int doc, int slotNum) throws IOException { } } - - class MinSlotAcc extends DoubleFuncSlotAcc { public MinSlotAcc(ValueSource values, FacetContext fcontext, int numSlots) { super(values, fcontext, numSlots, Double.NaN); @@ -297,10 +288,10 @@ public MinSlotAcc(ValueSource values, FacetContext fcontext, int numSlots) { @Override public void collect(int doc, int slotNum) throws IOException { double val = values.doubleVal(doc); - if (val == 0 && !values.exists(doc)) return; // depend on fact that non existing values return 0 for func query + if (val == 0 && !values.exists(doc)) return; // depend on fact that non existing values return 0 for func query double currMin = result[slotNum]; - if (!(val >= currMin)) { // val>=currMin will be false for staring value: val>=NaN + if (!(val >= currMin)) { // val>=currMin will be false for staring value: val>=NaN result[slotNum] = val; } } @@ -314,17 +305,16 @@ public MaxSlotAcc(ValueSource values, FacetContext fcontext, int numSlots) { @Override public void collect(int doc, int slotNum) throws IOException { double val = values.doubleVal(doc); - if (val == 0 && !values.exists(doc)) return; // depend on fact that non existing values return 0 for func query + if (val == 0 && !values.exists(doc)) return; // depend on fact that non existing values return 0 for func query double currMax = result[slotNum]; - if (!(val <= currMax)) { // reversed order to handle NaN + if (!(val <= currMax)) { // reversed order to handle NaN result[slotNum] = val; } } } - class AvgSlotAcc extends DoubleFuncSlotAcc { int[] counts; @@ -336,7 +326,7 @@ public AvgSlotAcc(ValueSource values, FacetContext fcontext, int numSlots) { @Override public void reset() { super.reset(); - for (int i=0; i numberList = (List)facetResult; + this.count += numberList.get(0).longValue(); + this.sumSq += numberList.get(1).doubleValue(); + this.sum += numberList.get(2).doubleValue(); + } + + @Override + public Object getMergedResult() { + return this.getDouble(); + } + + @Override + protected double getDouble() { + double val = count == 0 ? 0.0d : Math.sqrt((sumSq/count)-Math.pow(sum/count, 2)); + return val; + } + }; +} diff --git a/solr/core/src/java/org/apache/solr/search/facet/VarianceAgg.java b/solr/core/src/java/org/apache/solr/search/facet/VarianceAgg.java new file mode 100644 index 000000000000..ec6955f46634 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/search/facet/VarianceAgg.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.search.facet; + +import java.io.IOException; +import java.util.List; + +import org.apache.lucene.queries.function.ValueSource; + + +public class VarianceAgg extends SimpleAggValueSource { + public VarianceAgg(ValueSource vs) { + super("variance", vs); + } + + @Override + public SlotAcc createSlotAcc(FacetContext fcontext, int numDocs, int numSlots) throws IOException { + return new VarianceSlotAcc(getArg(), fcontext, numSlots); + } + + @Override + public FacetMerger createFacetMerger(Object prototype) { + return new Merger(); + } + + private static class Merger extends FacetDoubleMerger { + long count; + double sumSq; + double sum; + + @Override + @SuppressWarnings("unchecked") + public void merge(Object facetResult, Context mcontext1) { + List numberList = (List)facetResult; + this.count += numberList.get(0).longValue(); + this.sumSq += numberList.get(1).doubleValue(); + this.sum += numberList.get(2).doubleValue(); + } + + @Override + public Object getMergedResult() { + return this.getDouble(); + } + + @Override + protected double getDouble() { + double val = count == 0 ? 0.0d : (sumSq/count)-Math.pow(sum/count, 2); + return val; + } + }; +} diff --git a/solr/core/src/java/org/apache/solr/search/grouping/Command.java b/solr/core/src/java/org/apache/solr/search/grouping/Command.java index 1bba7fc23788..0973d91580c5 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/Command.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/Command.java @@ -53,23 +53,11 @@ public interface Command { */ String getKey(); - /** - * @return The group sort (overall sort) - */ - Sort getGroupSort(); - - /** - * @return The sort inside a group - */ - Sort getWithinGroupSort(); - - /** * @return The group SortSpec (overall sort) */ SortSpec getGroupSortSpec(); - /** * @return The SortSpec inside a group */ diff --git a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java index 8ba0a6a45af1..5b2609c0215a 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java @@ -29,11 +29,12 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; import org.apache.lucene.search.TimeLimitingCollector; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.grouping.AllGroupHeadsCollector; -import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector; +import org.apache.lucene.search.grouping.TermGroupSelector; +import org.apache.lucene.search.grouping.ValueSourceGroupSelector; import org.apache.solr.common.util.NamedList; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.SchemaField; @@ -172,11 +173,14 @@ private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List()), + sort); } else { - allGroupHeadsCollector = TermAllGroupHeadsCollector.create(firstCommand.getKey(), firstCommand.getWithinGroupSort()); + allGroupHeadsCollector + = AllGroupHeadsCollector.newCollector(new TermGroupSelector(firstCommand.getKey()), sort); } if (collectors.isEmpty()) { searchWithTimeLimiter(query, filter, allGroupHeadsCollector); diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/QueryCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/QueryCommand.java index a98533f36bdd..7d2eba2154e7 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/QueryCommand.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/QueryCommand.java @@ -37,15 +37,15 @@ public class QueryCommand implements Command { public static class Builder { - private Sort sort; + private SortSpec sortSpec; private String queryString; private Query query; private DocSet docSet; private Integer docsToCollect; private boolean needScores; - public Builder setSort(Sort sort) { - this.sort = sort; + public Builder setSortSpec(SortSpec sortSpec) { + this.sortSpec = sortSpec; return this; } @@ -95,16 +95,16 @@ public Builder setNeedScores(boolean needScores) { } public QueryCommand build() { - if (sort == null || query == null || docSet == null || docsToCollect == null) { + if (sortSpec == null || sortSpec.getSort() == null || query == null || docSet == null || docsToCollect == null) { throw new IllegalStateException("All fields must be set"); } - return new QueryCommand(sort, query, docsToCollect, needScores, docSet, queryString); + return new QueryCommand(sortSpec, query, docsToCollect, needScores, docSet, queryString); } } - private final Sort sort; + private final SortSpec sortSpec; private final Query query; private final DocSet docSet; private final int docsToCollect; @@ -114,8 +114,8 @@ public QueryCommand build() { private TopDocsCollector collector; private FilterCollector filterCollector; - private QueryCommand(Sort sort, Query query, int docsToCollect, boolean needScores, DocSet docSet, String queryString) { - this.sort = sort; + private QueryCommand(SortSpec sortSpec, Query query, int docsToCollect, boolean needScores, DocSet docSet, String queryString) { + this.sortSpec = sortSpec; this.query = query; this.docsToCollect = docsToCollect; this.needScores = needScores; @@ -125,6 +125,7 @@ private QueryCommand(Sort sort, Query query, int docsToCollect, boolean needScor @Override public List create() throws IOException { + final Sort sort = (sortSpec == null ? null : sortSpec.getSort()); if (sort == null || sort.equals(Sort.RELEVANCE)) { collector = TopScoreDocCollector.create(docsToCollect); } else { @@ -144,24 +145,14 @@ public String getKey() { return queryString != null ? queryString : query.toString(); } - @Override - public Sort getGroupSort() { - return sort; - } - - @Override - public Sort getWithinGroupSort() { - return null; - } - @Override public SortSpec getGroupSortSpec() { - return null; + return sortSpec; } @Override public SortSpec getWithinGroupSortSpec() { - return null; + return null; } } diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java index 1021a34c04b4..c02eb22e4898 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java @@ -16,25 +16,27 @@ */ package org.apache.solr.search.grouping.distributed.command; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Sort; import org.apache.lucene.search.grouping.AllGroupsCollector; import org.apache.lucene.search.grouping.FirstPassGroupingCollector; import org.apache.lucene.search.grouping.SearchGroup; -import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector; -import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermAllGroupsCollector; -import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector; +import org.apache.lucene.search.grouping.TermGroupSelector; +import org.apache.lucene.search.grouping.ValueSourceGroupSelector; import org.apache.lucene.util.BytesRef; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.SchemaField; import org.apache.solr.search.SortSpec; import org.apache.solr.search.grouping.Command; -import java.io.IOException; -import java.util.*; - /** * Creates all the collectors needed for the first phase and how to handle the results. */ @@ -68,9 +70,9 @@ public Builder setIncludeGroupCount(boolean includeGroupCount) { } public SearchGroupsFieldCommand build() { - if (field == null || groupSortSpec == null || groupSortSpec.getSort() == null || topNGroups == null) { - throw new IllegalStateException("All fields must be set"); - } + if (field == null || groupSortSpec == null || groupSortSpec.getSort() == null || topNGroups == null) { + throw new IllegalStateException("All fields must be set"); + } return new SearchGroupsFieldCommand(field, groupSortSpec, topNGroups, includeGroupCount); } @@ -102,18 +104,20 @@ public List create() throws IOException { if (topNGroups > 0) { if (fieldType.getNumberType() != null) { ValueSource vs = fieldType.getValueSource(field, null); - firstPassGroupingCollector = new FunctionFirstPassGroupingCollector(vs, new HashMap(), groupSort, topNGroups); + firstPassGroupingCollector + = new FirstPassGroupingCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>()), groupSort, topNGroups); } else { - firstPassGroupingCollector = new TermFirstPassGroupingCollector(field.getName(), groupSort, topNGroups); + firstPassGroupingCollector + = new FirstPassGroupingCollector<>(new TermGroupSelector(field.getName()), groupSort, topNGroups); } collectors.add(firstPassGroupingCollector); } if (includeGroupCount) { if (fieldType.getNumberType() != null) { ValueSource vs = fieldType.getValueSource(field, null); - allGroupsCollector = new FunctionAllGroupsCollector(vs, new HashMap()); + allGroupsCollector = new AllGroupsCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>())); } else { - allGroupsCollector = new TermAllGroupsCollector(field.getName()); + allGroupsCollector = new AllGroupsCollector<>(new TermGroupSelector(field.getName())); } collectors.add(allGroupsCollector); } @@ -142,26 +146,15 @@ public SearchGroupsFieldCommandResult result() throws IOException { } @Override - public Sort getWithinGroupSort() { + public SortSpec getWithinGroupSortSpec() { return null; } - @Override - public Sort getGroupSort() { - return groupSortSpec.getSort(); - } - - @Override public SortSpec getGroupSortSpec() { return groupSortSpec; } - @Override - public SortSpec getWithinGroupSortSpec() { - return null; - } - @Override public String getKey() { return field.getName(); diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java index 5d6933a730fa..b41faa83ac69 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java @@ -16,15 +16,22 @@ */ package org.apache.solr.search.grouping.distributed.command; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Sort; -import org.apache.lucene.search.grouping.SecondPassGroupingCollector; import org.apache.lucene.search.grouping.GroupDocs; import org.apache.lucene.search.grouping.SearchGroup; +import org.apache.lucene.search.grouping.TermGroupSelector; import org.apache.lucene.search.grouping.TopGroups; -import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector; -import org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector; +import org.apache.lucene.search.grouping.TopGroupsCollector; +import org.apache.lucene.search.grouping.ValueSourceGroupSelector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.mutable.MutableValue; import org.apache.solr.schema.FieldType; @@ -32,13 +39,6 @@ import org.apache.solr.search.SortSpec; import org.apache.solr.search.grouping.Command; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; - /** * Defines all collectors for retrieving the second phase and how to handle the collector result. */ @@ -60,13 +60,13 @@ public Builder setField(SchemaField field) { } public Builder setGroupSortSpec(SortSpec groupSortSpec) { - this.groupSortSpec = groupSortSpec; - return this; + this.groupSortSpec = groupSortSpec; + return this; } public Builder setWithinGroupSortSpec(SortSpec withinGroupSortSpec) { - this.withinGroupSortSpec = withinGroupSortSpec; - return this; + this.withinGroupSortSpec = withinGroupSortSpec; + return this; } public Builder setFirstPhaseGroups(Collection> firstPhaseGroups) { @@ -90,7 +90,7 @@ public Builder setNeedMaxScore(Boolean needMaxScore) { } public TopGroupsFieldCommand build() { - if (field == null || groupSortSpec == null || groupSortSpec.getSort() == null || withinGroupSortSpec == null || withinGroupSortSpec.getSort() == null || firstPhaseGroups == null || + if (field == null || groupSortSpec == null || groupSortSpec.getSort() == null || withinGroupSortSpec == null || withinGroupSortSpec.getSort() == null || firstPhaseGroups == null || maxDocPerGroup == null) { throw new IllegalStateException("All required fields must be set"); } @@ -107,7 +107,7 @@ public TopGroupsFieldCommand build() { private final int maxDocPerGroup; private final boolean needScores; private final boolean needMaxScore; - private SecondPassGroupingCollector secondPassCollector; + private TopGroupsCollector secondPassCollector; private TopGroupsFieldCommand(SchemaField field, SortSpec groupSortSpec, @@ -139,12 +139,12 @@ public List create() throws IOException { if (fieldType.getNumberType() != null) { ValueSource vs = fieldType.getValueSource(field, null); Collection> v = GroupConverter.toMutable(field, firstPhaseGroups); - secondPassCollector = new FunctionSecondPassGroupingCollector( - v, groupSort, withinGroupSort, maxDocPerGroup, needScores, needMaxScore, true, vs, new HashMap() + secondPassCollector = new TopGroupsCollector<>(new ValueSourceGroupSelector(vs, new HashMap<>()), + v, groupSort, withinGroupSort, maxDocPerGroup, needScores, needMaxScore, true ); } else { - secondPassCollector = new TermSecondPassGroupingCollector( - field.getName(), firstPhaseGroups, groupSort, withinGroupSort, maxDocPerGroup, needScores, needMaxScore, true + secondPassCollector = new TopGroupsCollector<>(new TermGroupSelector(field.getName()), + firstPhaseGroups, groupSort, withinGroupSort, maxDocPerGroup, needScores, needMaxScore, true ); } collectors.add(secondPassCollector); @@ -171,16 +171,6 @@ public String getKey() { return field.getName(); } - @Override - public Sort getGroupSort() { - return groupSortSpec.getSort(); - } - - @Override - public Sort getWithinGroupSort() { - return withinGroupSortSpec.getSort(); - } - @Override public SortSpec getGroupSortSpec() { return groupSortSpec; @@ -188,7 +178,7 @@ public SortSpec getGroupSortSpec() { @Override public SortSpec getWithinGroupSortSpec() { - return withinGroupSortSpec; + return withinGroupSortSpec; } } diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java index 4de9b27da972..15a0a04ef6d2 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java @@ -61,8 +61,9 @@ public void process(ResponseBuilder rb, ShardRequest shardRequest) { String[] fields = rb.getGroupingSpec().getFields(); String[] queries = rb.getGroupingSpec().getQueries(); SortSpec withinGroupSortSpec = rb.getGroupingSpec().getWithinGroupSortSpec(); - Sort withinGroupSort = withinGroupSortSpec.getSort(); assert withinGroupSortSpec != null; + Sort withinGroupSort = withinGroupSortSpec.getSort(); + assert withinGroupSort != null; // If group.format=simple group.offset doesn't make sense int groupOffsetDefault; diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java index 3310141dc393..9d916f9e11cf 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java @@ -83,7 +83,6 @@ public Map transformToNative(NamedList groupSchemaFields = groupSortSpec.getSchemaFields(); final SortField[] groupSortFields = groupSortSpec.getSort().getSort(); - assert (groupSchemaFields.size() == groupSortFields.length); for (Map.Entry command : shardResponse) { diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java index fee7fc4e95ee..be4d5270f407 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java @@ -93,7 +93,7 @@ public NamedList transform(List data) throws IOException { * {@inheritDoc} */ @Override - public Map transformToNative(NamedList shardResponse, SortSpec groupSortSpec, SortSpec withinGroupSortSpec, String shard) { + public Map transformToNative(NamedList shardResponse, SortSpec groupSortSpec, SortSpec withinGroupSortSpec, String shard) { Map result = new HashMap<>(); final IndexSchema schema = rb.req.getSearcher().getSchema(); @@ -237,12 +237,11 @@ protected NamedList serializeTopGroups(TopGroups data, SchemaField gro } FieldDoc fieldDoc = (FieldDoc) searchGroup.scoreDocs[i]; - - assert (withinGroupSchemaFields.size() == fieldDoc.fields.length); // JTODO (?) + assert (withinGroupSchemaFields.size() == fieldDoc.fields.length); Object[] convertedSortValues = new Object[fieldDoc.fields.length]; for (int j = 0; j < fieldDoc.fields.length; j++) { - convertedSortValues[j] = ShardResultTransformerUtils.marshalSortValue(fieldDoc.fields[j], withinGroupSchemaFields.get(j)); + convertedSortValues[j] = ShardResultTransformerUtils.marshalSortValue(fieldDoc.fields[j], withinGroupSchemaFields.get(j)); } document.add("sortValues", convertedSortValues); } diff --git a/solr/core/src/java/org/apache/solr/search/join/BlockJoinFacetComponentSupport.java b/solr/core/src/java/org/apache/solr/search/join/BlockJoinFacetComponentSupport.java index 85aa7996ed48..560e441a9237 100644 --- a/solr/core/src/java/org/apache/solr/search/join/BlockJoinFacetComponentSupport.java +++ b/solr/core/src/java/org/apache/solr/search/join/BlockJoinFacetComponentSupport.java @@ -124,11 +124,13 @@ private NamedList getNamedListFromList(NamedList parentList, String name, boolea @Override public void handleResponses(ResponseBuilder rb, ShardRequest sreq) { - NamedList collectedChildFacetFields = getChildFacetFields(rb.rsp.getValues(), true); - List responses = sreq.responses; - for (ShardResponse shardResponse : responses) { - NamedList shardChildFacetFields = getChildFacetFields(shardResponse.getSolrResponse().getResponse(), false); - mergeFacets(collectedChildFacetFields, shardChildFacetFields); + if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0) { + NamedList collectedChildFacetFields = getChildFacetFields(rb.rsp.getValues(), true); + List responses = sreq.responses; + for (ShardResponse shardResponse : responses) { + NamedList shardChildFacetFields = getChildFacetFields(shardResponse.getSolrResponse().getResponse(), false); + mergeFacets(collectedChildFacetFields, shardChildFacetFields); + } } } diff --git a/solr/core/src/java/org/apache/solr/search/join/ScoreJoinQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/join/ScoreJoinQParserPlugin.java index 999cd648ad75..a49195cdea44 100644 --- a/solr/core/src/java/org/apache/solr/search/join/ScoreJoinQParserPlugin.java +++ b/solr/core/src/java/org/apache/solr/search/join/ScoreJoinQParserPlugin.java @@ -21,8 +21,9 @@ import java.util.Objects; import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.JoinUtil; import org.apache.lucene.search.join.ScoreMode; import org.apache.solr.cloud.ZkController; @@ -86,10 +87,10 @@ public OtherCoreJoinQuery(Query fromQuery, String fromField, } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { SolrRequestInfo info = SolrRequestInfo.getRequestInfo(); - CoreContainer container = info.getReq().getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer container = info.getReq().getCore().getCoreContainer(); final SolrCore fromCore = container.getCore(fromIndex); @@ -106,7 +107,7 @@ public Query rewrite(IndexReader reader) throws IOException { fromCore.close(); fromHolder.decref(); } - return joinQuery.rewrite(reader); + return joinQuery.rewrite(searcher.getIndexReader()).createWeight(searcher, needsScores, boost); } @Override @@ -156,11 +157,11 @@ static class SameCoreJoinQuery extends Query { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { SolrRequestInfo info = SolrRequestInfo.getRequestInfo(); final Query jq = JoinUtil.createJoinQuery(fromField, true, toField, fromQuery, info.getReq().getSearcher(), scoreMode); - return jq.rewrite(reader); + return jq.rewrite(searcher.getIndexReader()).createWeight(searcher, needsScores, boost); } @@ -222,7 +223,7 @@ private Query createQuery(final String fromField, final String fromQueryStr, final String myCore = req.getCore().getCoreDescriptor().getName(); if (fromIndex != null && (!fromIndex.equals(myCore) || byPassShortCircutCheck)) { - CoreContainer container = req.getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer container = req.getCore().getCoreContainer(); final String coreName = getCoreName(fromIndex, container); final SolrCore fromCore = container.getCore(coreName); diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java index ff0db9b79909..39ccadc68bb3 100644 --- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java +++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java @@ -16,7 +16,6 @@ */ package org.apache.solr.servlet; -import javax.management.MBeanServer; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; import javax.servlet.ServletException; @@ -34,7 +33,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.lang.invoke.MethodHandles; -import java.lang.management.ManagementFactory; import java.nio.file.Path; import java.nio.file.Paths; import java.time.Instant; @@ -42,12 +40,12 @@ import java.util.Arrays; import java.util.Locale; import java.util.Properties; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.codahale.metrics.jvm.BufferPoolMetricSet; import com.codahale.metrics.jvm.ClassLoadingGaugeSet; import com.codahale.metrics.jvm.GarbageCollectorMetricSet; import com.codahale.metrics.jvm.MemoryUsageGaugeSet; @@ -66,9 +64,11 @@ import org.apache.solr.core.CoreContainer; import org.apache.solr.core.NodeConfig; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.core.SolrXmlConfig; +import org.apache.solr.metrics.AltBufferPoolMetricSet; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.metrics.OperatingSystemMetricSet; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.request.SolrRequestInfo; @@ -185,16 +185,24 @@ public void init(FilterConfig config) throws ServletException } private void setupJvmMetrics() { - MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer(); SolrMetricManager metricManager = cores.getMetricManager(); + final Set hiddenSysProps = cores.getConfig().getHiddenSysProps(); try { - String registry = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.jvm); - metricManager.registerAll(registry, new BufferPoolMetricSet(platformMBeanServer), true, "buffers"); + String registry = SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm); + metricManager.registerAll(registry, new AltBufferPoolMetricSet(), true, "buffers"); metricManager.registerAll(registry, new ClassLoadingGaugeSet(), true, "classes"); - metricManager.registerAll(registry, new OperatingSystemMetricSet(platformMBeanServer), true, "os"); + metricManager.registerAll(registry, new OperatingSystemMetricSet(), true, "os"); metricManager.registerAll(registry, new GarbageCollectorMetricSet(), true, "gc"); metricManager.registerAll(registry, new MemoryUsageGaugeSet(), true, "memory"); metricManager.registerAll(registry, new ThreadStatesGaugeSet(), true, "threads"); // todo should we use CachedThreadStatesGaugeSet instead? + MetricsMap sysprops = new MetricsMap((detailed, map) -> { + System.getProperties().forEach((k, v) -> { + if (!hiddenSysProps.contains(k)) { + map.put(String.valueOf(k), v); + } + }); + }); + metricManager.registerGauge(null, registry, sysprops, true, "properties", "system"); } catch (Exception e) { log.warn("Error registering JVM metrics", e); } diff --git a/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java b/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java index 3394de1c91da..12369f7e4c10 100644 --- a/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java +++ b/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java @@ -15,6 +15,8 @@ * limitations under the License. */ package org.apache.solr.spelling; +import static org.apache.solr.common.params.CommonParams.ID; + import java.lang.invoke.MethodHandles; import java.util.ArrayList; import java.util.Arrays; @@ -41,8 +43,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.solr.common.params.CommonParams.ID; - public class SpellCheckCollator { private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private int maxCollations = 1; @@ -132,6 +132,19 @@ public List collate(SpellingResult result, params.remove(DisMaxParams.BF); // Collate testing does not support Grouping (see SOLR-2577) params.remove(GroupParams.GROUP); + + // Collate testing does not support the Collapse QParser (See SOLR-8807) + params.remove("expand"); + String[] filters = params.getParams(CommonParams.FQ); + if (filters != null) { + List filtersToApply = new ArrayList<>(filters.length); + for (String fq : filters) { + if (!fq.startsWith("{!collapse")) { + filtersToApply.add(fq); + } + } + params.set("fq", filtersToApply.toArray(new String[filtersToApply.size()])); + } // creating a request here... make sure to close it! ResponseBuilder checkResponse = new ResponseBuilder( diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java b/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java index d3e34979b74d..b8b9bea11ab1 100644 --- a/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java +++ b/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java @@ -16,20 +16,23 @@ */ package org.apache.solr.store.blockcache; -import java.net.URL; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; -import org.apache.solr.core.SolrInfoMBean; +import com.codahale.metrics.MetricRegistry; +import org.apache.solr.core.SolrInfoBean; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.search.SolrCacheBase; /** - * A {@link SolrInfoMBean} that provides metrics on block cache operations. + * A {@link SolrInfoBean} that provides metrics on block cache operations. * * @lucene.experimental */ -public class Metrics extends SolrCacheBase implements SolrInfoMBean { +public class Metrics extends SolrCacheBase implements SolrInfoBean, SolrMetricProducer { public AtomicLong blockCacheSize = new AtomicLong(0); @@ -50,66 +53,70 @@ public class Metrics extends SolrCacheBase implements SolrInfoMBean { public AtomicLong shardBuffercacheAllocate = new AtomicLong(0); public AtomicLong shardBuffercacheLost = new AtomicLong(0); + private MetricsMap metricsMap; + private MetricRegistry registry; + private Set metricNames = new HashSet<>(); private long previous = System.nanoTime(); - - public NamedList getStatistics() { - NamedList stats = new SimpleOrderedMap<>(21); // room for one method call before growing - - long now = System.nanoTime(); - long delta = Math.max(now - previous, 1); - double seconds = delta / 1000000000.0; - - long hits_total = blockCacheHit.get(); - long hits_delta = hits_total - blockCacheHit_last.get(); - blockCacheHit_last.set(hits_total); - - long miss_total = blockCacheMiss.get(); - long miss_delta = miss_total - blockCacheMiss_last.get(); - blockCacheMiss_last.set(miss_total); - - long evict_total = blockCacheEviction.get(); - long evict_delta = evict_total - blockCacheEviction_last.get(); - blockCacheEviction_last.set(evict_total); - - long storeFail_total = blockCacheStoreFail.get(); - long storeFail_delta = storeFail_total - blockCacheStoreFail_last.get(); - blockCacheStoreFail_last.set(storeFail_total); - - long lookups_delta = hits_delta + miss_delta; - long lookups_total = hits_total + miss_total; - - stats.add("size", blockCacheSize.get()); - stats.add("lookups", lookups_total); - stats.add("hits", hits_total); - stats.add("evictions", evict_total); - stats.add("storeFails", storeFail_total); - stats.add("hitratio_current", calcHitRatio(lookups_delta, hits_delta)); // hit ratio since the last call - stats.add("lookups_persec", getPerSecond(lookups_delta, seconds)); // lookups per second since the last call - stats.add("hits_persec", getPerSecond(hits_delta, seconds)); // hits per second since the last call - stats.add("evictions_persec", getPerSecond(evict_delta, seconds)); // evictions per second since the last call - stats.add("storeFails_persec", getPerSecond(storeFail_delta, seconds)); // evictions per second since the last call - stats.add("time_delta", seconds); // seconds since last call - - // TODO: these aren't really related to the BlockCache - stats.add("buffercache.allocations", getPerSecond(shardBuffercacheAllocate.getAndSet(0), seconds)); - stats.add("buffercache.lost", getPerSecond(shardBuffercacheLost.getAndSet(0), seconds)); - - previous = now; - - return stats; + @Override + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + metricsMap = new MetricsMap((detailed, map) -> { + long now = System.nanoTime(); + long delta = Math.max(now - previous, 1); + double seconds = delta / 1000000000.0; + + long hits_total = blockCacheHit.get(); + long hits_delta = hits_total - blockCacheHit_last.get(); + blockCacheHit_last.set(hits_total); + + long miss_total = blockCacheMiss.get(); + long miss_delta = miss_total - blockCacheMiss_last.get(); + blockCacheMiss_last.set(miss_total); + + long evict_total = blockCacheEviction.get(); + long evict_delta = evict_total - blockCacheEviction_last.get(); + blockCacheEviction_last.set(evict_total); + + long storeFail_total = blockCacheStoreFail.get(); + long storeFail_delta = storeFail_total - blockCacheStoreFail_last.get(); + blockCacheStoreFail_last.set(storeFail_total); + + long lookups_delta = hits_delta + miss_delta; + long lookups_total = hits_total + miss_total; + + map.put("size", blockCacheSize.get()); + map.put("lookups", lookups_total); + map.put("hits", hits_total); + map.put("evictions", evict_total); + map.put("storeFails", storeFail_total); + map.put("hitratio_current", calcHitRatio(lookups_delta, hits_delta)); // hit ratio since the last call + map.put("lookups_persec", getPerSecond(lookups_delta, seconds)); // lookups per second since the last call + map.put("hits_persec", getPerSecond(hits_delta, seconds)); // hits per second since the last call + map.put("evictions_persec", getPerSecond(evict_delta, seconds)); // evictions per second since the last call + map.put("storeFails_persec", getPerSecond(storeFail_delta, seconds)); // evictions per second since the last call + map.put("time_delta", seconds); // seconds since last call + + // TODO: these aren't really related to the BlockCache + map.put("buffercache.allocations", getPerSecond(shardBuffercacheAllocate.getAndSet(0), seconds)); + map.put("buffercache.lost", getPerSecond(shardBuffercacheLost.getAndSet(0), seconds)); + + previous = now; + + }); + manager.registerGauge(this, registryName, metricsMap, true, getName(), getCategory().toString(), scope); } private float getPerSecond(long value, double seconds) { return (float) (value / seconds); } - // SolrInfoMBean methods + // SolrInfoBean methods @Override public String getName() { - return "HdfsBlockCache"; + return "hdfsBlockCache"; } @Override @@ -118,12 +125,13 @@ public String getDescription() { } @Override - public String getSource() { - return null; + public Set getMetricNames() { + return metricNames; } @Override - public URL[] getDocs() { - return null; + public MetricRegistry getMetricRegistry() { + return registry; } + } diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java index ba7c7fd1393e..64e6356dcd84 100644 --- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java +++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLocalityReporter.java @@ -18,8 +18,8 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -27,16 +27,18 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import com.codahale.metrics.MetricRegistry; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class HdfsLocalityReporter implements SolrInfoMBean { +public class HdfsLocalityReporter implements SolrInfoBean, SolrMetricProducer { public static final String LOCALITY_BYTES_TOTAL = "locality.bytes.total"; public static final String LOCALITY_BYTES_LOCAL = "locality.bytes.local"; public static final String LOCALITY_BYTES_RATIO = "locality.bytes.ratio"; @@ -49,6 +51,9 @@ public class HdfsLocalityReporter implements SolrInfoMBean { private String hostname; private final ConcurrentMap> cache; + private final Set metricNames = new HashSet<>(); + private MetricRegistry registry; + public HdfsLocalityReporter() { cache = new ConcurrentHashMap<>(); } @@ -66,11 +71,6 @@ public String getName() { return "hdfs-locality"; } - @Override - public String getVersion() { - return getClass().getPackage().getSpecificationVersion(); - } - @Override public String getDescription() { return "Provides metrics for HDFS data locality."; @@ -82,89 +82,71 @@ public Category getCategory() { } @Override - public String getSource() { - return null; + public Set getMetricNames() { + return metricNames; } @Override - public URL[] getDocs() { - return null; + public MetricRegistry getMetricRegistry() { + return registry; } /** * Provide statistics on HDFS block locality, both in terms of bytes and block counts. */ @Override - public NamedList getStatistics() { - long totalBytes = 0; - long localBytes = 0; - int totalCount = 0; - int localCount = 0; - - for (Iterator iterator = cache.keySet().iterator(); iterator.hasNext();) { - HdfsDirectory hdfsDirectory = iterator.next(); - - if (hdfsDirectory.isClosed()) { - iterator.remove(); - } else { - try { - refreshDirectory(hdfsDirectory); - Map blockMap = cache.get(hdfsDirectory); - - // For every block in every file in this directory, count it - for (BlockLocation[] locations : blockMap.values()) { - for (BlockLocation bl : locations) { - totalBytes += bl.getLength(); - totalCount++; - - if (Arrays.asList(bl.getHosts()).contains(hostname)) { - localBytes += bl.getLength(); - localCount++; + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + MetricsMap metricsMap = new MetricsMap((detailed, map) -> { + long totalBytes = 0; + long localBytes = 0; + int totalCount = 0; + int localCount = 0; + + for (Iterator iterator = cache.keySet().iterator(); iterator.hasNext();) { + HdfsDirectory hdfsDirectory = iterator.next(); + + if (hdfsDirectory.isClosed()) { + iterator.remove(); + } else { + try { + refreshDirectory(hdfsDirectory); + Map blockMap = cache.get(hdfsDirectory); + + // For every block in every file in this directory, count it + for (BlockLocation[] locations : blockMap.values()) { + for (BlockLocation bl : locations) { + totalBytes += bl.getLength(); + totalCount++; + + if (Arrays.asList(bl.getHosts()).contains(hostname)) { + localBytes += bl.getLength(); + localCount++; + } } } + } catch (IOException e) { + logger.warn("Could not retrieve locality information for {} due to exception: {}", + hdfsDirectory.getHdfsDirPath(), e); } - } catch (IOException e) { - logger.warn("Could not retrieve locality information for {} due to exception: {}", - hdfsDirectory.getHdfsDirPath(), e); } } - } - - return createStatistics(totalBytes, localBytes, totalCount, localCount); - } - - /** - * Generate a statistics object based on the given measurements for all files monitored by this reporter. - * - * @param totalBytes - * The total bytes used - * @param localBytes - * The amount of bytes found on local nodes - * @param totalCount - * The total block count - * @param localCount - * The amount of blocks found on local nodes - * @return HDFS block locality statistics - */ - private NamedList createStatistics(long totalBytes, long localBytes, int totalCount, int localCount) { - NamedList statistics = new SimpleOrderedMap(); - - statistics.add(LOCALITY_BYTES_TOTAL, totalBytes); - statistics.add(LOCALITY_BYTES_LOCAL, localBytes); - if (localBytes == 0) { - statistics.add(LOCALITY_BYTES_RATIO, 0); - } else { - statistics.add(LOCALITY_BYTES_RATIO, localBytes / (double) totalBytes); - } - statistics.add(LOCALITY_BLOCKS_TOTAL, totalCount); - statistics.add(LOCALITY_BLOCKS_LOCAL, localCount); - if (localCount == 0) { - statistics.add(LOCALITY_BLOCKS_RATIO, 0); - } else { - statistics.add(LOCALITY_BLOCKS_RATIO, localCount / (double) totalCount); - } - - return statistics; + map.put(LOCALITY_BYTES_TOTAL, totalBytes); + map.put(LOCALITY_BYTES_LOCAL, localBytes); + if (localBytes == 0) { + map.put(LOCALITY_BYTES_RATIO, 0); + } else { + map.put(LOCALITY_BYTES_RATIO, localBytes / (double) totalBytes); + } + map.put(LOCALITY_BLOCKS_TOTAL, totalCount); + map.put(LOCALITY_BLOCKS_LOCAL, localCount); + if (localCount == 0) { + map.put(LOCALITY_BLOCKS_RATIO, 0); + } else { + map.put(LOCALITY_BLOCKS_RATIO, localCount / (double) totalCount); + } + }); + manager.registerGauge(this, registryName, metricsMap, true, "hdfsLocality", getCategory().toString(), scope); } /** @@ -209,4 +191,5 @@ private void refreshDirectory(HdfsDirectory dir) throws IOException { } } } + } diff --git a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java index 0ba0b813e2b3..7006b4a290c5 100644 --- a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java +++ b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java @@ -19,11 +19,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Map; +import java.util.function.Function; import org.apache.lucene.document.BinaryDocValuesField; // javadocs import org.apache.lucene.document.NumericDocValuesField; // javadocs import org.apache.lucene.document.SortedDocValuesField; // javadocs -import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; // javadocs import org.apache.lucene.document.StringField; // javadocs import org.apache.lucene.index.BinaryDocValues; @@ -170,62 +170,44 @@ public static enum Type { * Fields with this type act as if they were indexed with * {@link SortedSetDocValuesField}. */ - SORTED_SET_DOUBLE, - /** - * Multi-valued Integer, (e.g. indexed with {@link org.apache.lucene.document.IntPoint}) - *

    - * Fields with this type act as if they were indexed with - * {@link SortedNumericDocValuesField}. - */ - SORTED_INTEGER, - /** - * Multi-valued Float, (e.g. indexed with {@link org.apache.lucene.document.FloatPoint}) - *

    - * Fields with this type act as if they were indexed with - * {@link SortedNumericDocValuesField}. - */ - SORTED_FLOAT, - /** - * Multi-valued Long, (e.g. indexed with {@link org.apache.lucene.document.LongPoint}) - *

    - * Fields with this type act as if they were indexed with - * {@link SortedNumericDocValuesField}. - */ - SORTED_LONG, - /** - * Multi-valued Double, (e.g. indexed with {@link org.apache.lucene.document.DoublePoint}) - *

    - * Fields with this type act as if they were indexed with - * {@link SortedNumericDocValuesField}. - */ - SORTED_DOUBLE + SORTED_SET_DOUBLE + } /** + * * Wraps a provided DirectoryReader. Note that for convenience, the returned reader * can be used normally (e.g. passed to {@link DirectoryReader#openIfChanged(DirectoryReader)}) * and so on. + * + * @param in input directory reader + * @param perSegmentMapper function to map a segment reader to a mapping of fields to their uninversion type + * @return a wrapped directory reader */ + public static DirectoryReader wrap(DirectoryReader in, final Function> perSegmentMapper) throws IOException { + return new UninvertingDirectoryReader(in, perSegmentMapper); + } + public static DirectoryReader wrap(DirectoryReader in, final Map mapping) throws IOException { - return new UninvertingDirectoryReader(in, mapping); + return UninvertingReader.wrap(in, (r) -> mapping); } static class UninvertingDirectoryReader extends FilterDirectoryReader { - final Map mapping; + final Function> mapper; - public UninvertingDirectoryReader(DirectoryReader in, final Map mapping) throws IOException { + public UninvertingDirectoryReader(DirectoryReader in, final Function> mapper) throws IOException { super(in, new FilterDirectoryReader.SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { - return new UninvertingReader(reader, mapping); + return new UninvertingReader(reader, mapper.apply(reader)); } }); - this.mapping = mapping; + this.mapper = mapper; } @Override protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { - return new UninvertingDirectoryReader(in, mapping); + return new UninvertingDirectoryReader(in, mapper); } // NOTE: delegating the cache helpers is wrong since this wrapper alters the @@ -244,7 +226,7 @@ public CacheHelper getReaderCacheHelper() { /** * Create a new UninvertingReader with the specified mapping *

    - * Expert: This should almost never be used. Use {@link #wrap(DirectoryReader, Map)} + * Expert: This should almost never be used. Use {@link #wrap(DirectoryReader, Function)} * instead. * * @lucene.internal @@ -293,12 +275,6 @@ public UninvertingReader(LeafReader in, Map mapping) { case SORTED_SET_DOUBLE: type = DocValuesType.SORTED_SET; break; - case SORTED_INTEGER: - case SORTED_FLOAT: - case SORTED_LONG: - case SORTED_DOUBLE: - type = DocValuesType.SORTED_NUMERIC; - break; default: throw new AssertionError(); } diff --git a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java index d0daebbb954b..bc2afa879c34 100644 --- a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java +++ b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java @@ -281,7 +281,7 @@ public void doRecovery(CoreContainer cc, CoreDescriptor cd) { Thread thread = new Thread() { @Override public void run() { - MDCLoggingContext.setCoreDescriptor(cd); + MDCLoggingContext.setCoreDescriptor(cc, cd); try { if (SKIP_AUTO_RECOVERY) { log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery"); diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java index fdc9d2223ecd..e7f6a7beb285 100644 --- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java +++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -48,8 +47,6 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.core.SolrConfig.UpdateHandlerInfo; import org.apache.solr.core.SolrCore; import org.apache.solr.metrics.SolrMetricManager; @@ -124,7 +121,7 @@ public DirectUpdateHandler2(SolrCore core) { commitWithinSoftCommit = updateHandlerInfo.commitWithinSoftCommit; indexWriterCloseWaitsForMerges = updateHandlerInfo.indexWriterCloseWaitsForMerges; - ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = core.getCoreContainer().getZkController(); if (zkController != null) { DocCollection dc = zkController.getClusterState().getCollection(core.getCoreDescriptor().getCollectionName()); if (dc.getRealtimeReplicas() == 1) { @@ -161,25 +158,42 @@ public DirectUpdateHandler2(SolrCore core, UpdateHandler updateHandler) { } @Override - public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { - commitCommands = manager.meter(registry, "commits", getCategory().toString(), scope); - manager.registerGauge(registry, () -> commitTracker.getCommitCount(), true, "autoCommits", getCategory().toString(), scope); - manager.registerGauge(registry, () -> softCommitTracker.getCommitCount(), true, "softAutoCommits", getCategory().toString(), scope); - optimizeCommands = manager.meter(registry, "optimizes", getCategory().toString(), scope); - rollbackCommands = manager.meter(registry, "rollbacks", getCategory().toString(), scope); - splitCommands = manager.meter(registry, "splits", getCategory().toString(), scope); - mergeIndexesCommands = manager.meter(registry, "merges", getCategory().toString(), scope); - expungeDeleteCommands = manager.meter(registry, "expungeDeletes", getCategory().toString(), scope); - manager.registerGauge(registry, () -> numDocsPending.longValue(), true, "docsPending", getCategory().toString(), scope); - manager.registerGauge(registry, () -> addCommands.longValue(), true, "adds", getCategory().toString(), scope); - manager.registerGauge(registry, () -> deleteByIdCommands.longValue(), true, "deletesById", getCategory().toString(), scope); - manager.registerGauge(registry, () -> deleteByQueryCommands.longValue(), true, "deletesByQuery", getCategory().toString(), scope); - manager.registerGauge(registry, () -> numErrors.longValue(), true, "errors", getCategory().toString(), scope); - - addCommandsCumulative = manager.meter(registry, "cumulativeAdds", getCategory().toString(), scope); - deleteByIdCommandsCumulative = manager.meter(registry, "cumulativeDeletesById", getCategory().toString(), scope); - deleteByQueryCommandsCumulative = manager.meter(registry, "cumulativeDeletesByQuery", getCategory().toString(), scope); - numErrorsCumulative = manager.meter(registry, "cumulativeErrors", getCategory().toString(), scope); + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + this.registry = manager.registry(registryName); + commitCommands = manager.meter(this, registryName, "commits", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> commitTracker.getCommitCount(), true, "autoCommits", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> softCommitTracker.getCommitCount(), true, "softAutoCommits", getCategory().toString(), scope); + if (commitTracker.getDocsUpperBound() > 0) { + manager.registerGauge(this, registryName, () -> commitTracker.getDocsUpperBound(), true, "autoCommitMaxDocs", + getCategory().toString(), scope); + } + if (commitTracker.getTimeUpperBound() > 0) { + manager.registerGauge(this, registryName, () -> "" + commitTracker.getTimeUpperBound() + "ms", true, "autoCommitMaxTime", + getCategory().toString(), scope); + } + if (softCommitTracker.getDocsUpperBound() > 0) { + manager.registerGauge(this, registryName, () -> softCommitTracker.getDocsUpperBound(), true, "softAutoCommitMaxDocs", + getCategory().toString(), scope); + } + if (softCommitTracker.getTimeUpperBound() > 0) { + manager.registerGauge(this, registryName, () -> "" + softCommitTracker.getTimeUpperBound() + "ms", true, "softAutoCommitMaxTime", + getCategory().toString(), scope); + } + optimizeCommands = manager.meter(this, registryName, "optimizes", getCategory().toString(), scope); + rollbackCommands = manager.meter(this, registryName, "rollbacks", getCategory().toString(), scope); + splitCommands = manager.meter(this, registryName, "splits", getCategory().toString(), scope); + mergeIndexesCommands = manager.meter(this, registryName, "merges", getCategory().toString(), scope); + expungeDeleteCommands = manager.meter(this, registryName, "expungeDeletes", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> numDocsPending.longValue(), true, "docsPending", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> addCommands.longValue(), true, "adds", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> deleteByIdCommands.longValue(), true, "deletesById", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> deleteByQueryCommands.longValue(), true, "deletesByQuery", getCategory().toString(), scope); + manager.registerGauge(this, registryName, () -> numErrors.longValue(), true, "errors", getCategory().toString(), scope); + + addCommandsCumulative = manager.meter(this, registryName, "cumulativeAdds", getCategory().toString(), scope); + deleteByIdCommandsCumulative = manager.meter(this, registryName, "cumulativeDeletesById", getCategory().toString(), scope); + deleteByQueryCommandsCumulative = manager.meter(this, registryName, "cumulativeDeletesByQuery", getCategory().toString(), scope); + numErrorsCumulative = manager.meter(this, registryName, "cumulativeErrors", getCategory().toString(), scope); } private void deleteAll() throws IOException { @@ -755,7 +769,7 @@ public void newIndexWriter(boolean rollback) throws IOException { */ @Override public void rollback(RollbackUpdateCommand cmd) throws IOException { - if (core.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { + if (core.getCoreContainer().isZooKeeperAware()) { throw new UnsupportedOperationException("Rollback is currently not supported in SolrCloud mode. (SOLR-4895)"); } @@ -811,7 +825,7 @@ public void close() throws IOException { @Override public void closeWriter(IndexWriter writer) throws IOException { - assert TestInjection.injectNonGracefullClose(core.getCoreDescriptor().getCoreContainer()); + assert TestInjection.injectNonGracefullClose(core.getCoreContainer()); boolean clearRequestInfo = false; solrCoreState.getCommitLock().lock(); @@ -951,7 +965,7 @@ private void updateDocument(AddUpdateCommand cmd, IndexWriter writer, Term updat ///////////////////////////////////////////////////////////////////// - // SolrInfoMBean stuff: Statistics and Module Info + // SolrInfoBean stuff: Statistics and Module Info ///////////////////////////////////////////////////////////////////// @Override @@ -959,70 +973,11 @@ public String getName() { return DirectUpdateHandler2.class.getName(); } - @Override - public String getVersion() { - return SolrCore.version; - } - @Override public String getDescription() { return "Update handler that efficiently directly updates the on-disk main lucene index"; } - @Override - public String getSource() { - return null; - } - - @Override - public URL[] getDocs() { - return null; - } - - @Override - public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); - lst.add("commits", commitCommands.getCount()); - if (commitTracker.getDocsUpperBound() > 0) { - lst.add("autocommit maxDocs", commitTracker.getDocsUpperBound()); - } - if (commitTracker.getTimeUpperBound() > 0) { - lst.add("autocommit maxTime", "" + commitTracker.getTimeUpperBound() + "ms"); - } - lst.add("autocommits", commitTracker.getCommitCount()); - if (softCommitTracker.getDocsUpperBound() > 0) { - lst.add("soft autocommit maxDocs", softCommitTracker.getDocsUpperBound()); - } - if (softCommitTracker.getTimeUpperBound() > 0) { - lst.add("soft autocommit maxTime", "" + softCommitTracker.getTimeUpperBound() + "ms"); - } - lst.add("soft autocommits", softCommitTracker.getCommitCount()); - lst.add("optimizes", optimizeCommands.getCount()); - lst.add("rollbacks", rollbackCommands.getCount()); - lst.add("expungeDeletes", expungeDeleteCommands.getCount()); - lst.add("docsPending", numDocsPending.longValue()); - // pset.size() not synchronized, but it should be fine to access. - // lst.add("deletesPending", pset.size()); - lst.add("adds", addCommands.longValue()); - lst.add("deletesById", deleteByIdCommands.longValue()); - lst.add("deletesByQuery", deleteByQueryCommands.longValue()); - lst.add("errors", numErrors.longValue()); - lst.add("cumulative_adds", addCommandsCumulative.getCount()); - lst.add("cumulative_deletesById", deleteByIdCommandsCumulative.getCount()); - lst.add("cumulative_deletesByQuery", deleteByQueryCommandsCumulative.getCount()); - lst.add("cumulative_errors", numErrorsCumulative.getCount()); - if (this.ulog != null) { - lst.add("transaction_logs_total_size", ulog.getTotalLogsSize()); - lst.add("transaction_logs_total_number", ulog.getTotalLogsNumber()); - } - return lst; - } - - @Override - public String toString() { - return "DirectUpdateHandler2" + getStatistics(); - } - @Override public SolrCoreState getSolrCoreState() { return solrCoreState; diff --git a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java index 71e20d9f2608..7bb74d05bf90 100644 --- a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java +++ b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java @@ -37,7 +37,7 @@ import org.apache.solr.common.util.IOUtils; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.util.HdfsUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -262,7 +262,7 @@ public void init(UpdateHandler uhandler, SolrCore core) { } // initialize metrics - core.getCoreMetricManager().registerMetricProducer(SolrInfoMBean.Category.TLOG.toString(), this); + core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.TLOG.toString(), this); } @Override diff --git a/solr/core/src/java/org/apache/solr/update/PeerSync.java b/solr/core/src/java/org/apache/solr/update/PeerSync.java index 9470cca41be5..dfadb0cafdb7 100644 --- a/solr/core/src/java/org/apache/solr/update/PeerSync.java +++ b/solr/core/src/java/org/apache/solr/update/PeerSync.java @@ -43,7 +43,7 @@ import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.StrUtils; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.handler.component.HttpShardHandlerFactory; import org.apache.solr.handler.component.ShardHandler; import org.apache.solr.handler.component.ShardHandlerFactory; @@ -151,25 +151,25 @@ public PeerSync(SolrCore core, List replicas, int nUpdates, boolean cant this.cantReachIsSuccess = cantReachIsSuccess; this.getNoVersionsIsSuccess = getNoVersionsIsSuccess; this.doFingerprint = doFingerprint && !("true".equals(System.getProperty("solr.disableFingerprint"))); - this.client = core.getCoreDescriptor().getCoreContainer().getUpdateShardHandler().getHttpClient(); + this.client = core.getCoreContainer().getUpdateShardHandler().getHttpClient(); this.onlyIfActive = onlyIfActive; uhandler = core.getUpdateHandler(); ulog = uhandler.getUpdateLog(); // TODO: close - shardHandlerFactory = (HttpShardHandlerFactory) core.getCoreDescriptor().getCoreContainer().getShardHandlerFactory(); + shardHandlerFactory = (HttpShardHandlerFactory) core.getCoreContainer().getShardHandlerFactory(); shardHandler = shardHandlerFactory.getShardHandler(client); - core.getCoreMetricManager().registerMetricProducer(SolrInfoMBean.Category.REPLICATION.toString(), this); + core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.REPLICATION.toString(), this); } public static final String METRIC_SCOPE = "peerSync"; @Override public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { - syncTime = manager.timer(registry, "time", scope, METRIC_SCOPE); - syncErrors = manager.counter(registry, "errors", scope, METRIC_SCOPE); - syncSkipped = manager.counter(registry, "skipped", scope, METRIC_SCOPE); + syncTime = manager.timer(null, registry, "time", scope, METRIC_SCOPE); + syncErrors = manager.counter(null, registry, "errors", scope, METRIC_SCOPE); + syncSkipped = manager.counter(null, registry, "skipped", scope, METRIC_SCOPE); } /** optional list of updates we had before possibly receiving new updates */ @@ -184,7 +184,7 @@ public long percentile(List arr, float frac) { // start of peersync related debug messages. includes the core name for correlation. private String msg() { - ZkController zkController = uhandler.core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zkController = uhandler.core.getCoreContainer().getZkController(); String myURL = ""; @@ -882,7 +882,7 @@ private boolean handleUpdates(ShardResponse srsp) { /** Requests and applies recent updates from peers */ public static void sync(SolrCore core, List replicas, int nUpdates) { - ShardHandlerFactory shardHandlerFactory = core.getCoreDescriptor().getCoreContainer().getShardHandlerFactory(); + ShardHandlerFactory shardHandlerFactory = core.getCoreContainer().getShardHandlerFactory(); ShardHandler shardHandler = shardHandlerFactory.getShardHandler(); diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java index ed856040ec16..049d2286b759 100644 --- a/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java +++ b/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java @@ -39,7 +39,7 @@ import org.apache.solr.core.DirectoryFactory; import org.apache.solr.core.DirectoryFactory.DirContext; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.schema.IndexSchema; import org.slf4j.Logger; @@ -126,7 +126,7 @@ private SolrIndexWriter(SolrCore core, String name, String path, Directory direc infoStream = getConfig().getInfoStream(); this.directory = directory; numOpens.incrementAndGet(); - SolrMetricManager metricManager = core.getCoreDescriptor().getCoreContainer().getMetricManager(); + SolrMetricManager metricManager = core.getCoreContainer().getMetricManager(); String registry = core.getCoreMetricManager().getRegistryName(); if (config.metricsInfo != null && config.metricsInfo.initArgs != null) { Object v = config.metricsInfo.initArgs.get("majorMergeDocs"); @@ -151,20 +151,20 @@ private SolrIndexWriter(SolrCore core, String name, String path, Directory direc } if (mergeDetails) { mergeTotals = true; // override - majorMergedDocs = metricManager.meter(registry, "docs", SolrInfoMBean.Category.INDEX.toString(), "merge", "major"); - majorDeletedDocs = metricManager.meter(registry, "deletedDocs", SolrInfoMBean.Category.INDEX.toString(), "merge", "major"); + majorMergedDocs = metricManager.meter(null, registry, "docs", SolrInfoBean.Category.INDEX.toString(), "merge", "major"); + majorDeletedDocs = metricManager.meter(null, registry, "deletedDocs", SolrInfoBean.Category.INDEX.toString(), "merge", "major"); } if (mergeTotals) { - minorMerge = metricManager.timer(registry, "minor", SolrInfoMBean.Category.INDEX.toString(), "merge"); - majorMerge = metricManager.timer(registry, "major", SolrInfoMBean.Category.INDEX.toString(), "merge"); - mergeErrors = metricManager.counter(registry, "errors", SolrInfoMBean.Category.INDEX.toString(), "merge"); - metricManager.registerGauge(registry, () -> runningMajorMerges.get(), true, "running", SolrInfoMBean.Category.INDEX.toString(), "merge", "major"); - metricManager.registerGauge(registry, () -> runningMinorMerges.get(), true, "running", SolrInfoMBean.Category.INDEX.toString(), "merge", "minor"); - metricManager.registerGauge(registry, () -> runningMajorMergesDocs.get(), true, "running.docs", SolrInfoMBean.Category.INDEX.toString(), "merge", "major"); - metricManager.registerGauge(registry, () -> runningMinorMergesDocs.get(), true, "running.docs", SolrInfoMBean.Category.INDEX.toString(), "merge", "minor"); - metricManager.registerGauge(registry, () -> runningMajorMergesSegments.get(), true, "running.segments", SolrInfoMBean.Category.INDEX.toString(), "merge", "major"); - metricManager.registerGauge(registry, () -> runningMinorMergesSegments.get(), true, "running.segments", SolrInfoMBean.Category.INDEX.toString(), "merge", "minor"); - flushMeter = metricManager.meter(registry, "flush", SolrInfoMBean.Category.INDEX.toString()); + minorMerge = metricManager.timer(null, registry, "minor", SolrInfoBean.Category.INDEX.toString(), "merge"); + majorMerge = metricManager.timer(null, registry, "major", SolrInfoBean.Category.INDEX.toString(), "merge"); + mergeErrors = metricManager.counter(null, registry, "errors", SolrInfoBean.Category.INDEX.toString(), "merge"); + metricManager.registerGauge(null, registry, () -> runningMajorMerges.get(), true, "running", SolrInfoBean.Category.INDEX.toString(), "merge", "major"); + metricManager.registerGauge(null, registry, () -> runningMinorMerges.get(), true, "running", SolrInfoBean.Category.INDEX.toString(), "merge", "minor"); + metricManager.registerGauge(null, registry, () -> runningMajorMergesDocs.get(), true, "running.docs", SolrInfoBean.Category.INDEX.toString(), "merge", "major"); + metricManager.registerGauge(null, registry, () -> runningMinorMergesDocs.get(), true, "running.docs", SolrInfoBean.Category.INDEX.toString(), "merge", "minor"); + metricManager.registerGauge(null, registry, () -> runningMajorMergesSegments.get(), true, "running.segments", SolrInfoBean.Category.INDEX.toString(), "merge", "major"); + metricManager.registerGauge(null, registry, () -> runningMinorMergesSegments.get(), true, "running.segments", SolrInfoBean.Category.INDEX.toString(), "merge", "minor"); + flushMeter = metricManager.meter(null, registry, "flush", SolrInfoBean.Category.INDEX.toString()); } } } diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java index cbfb0d5f1fca..49d2664c6494 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java @@ -19,14 +19,17 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; +import java.util.HashSet; +import java.util.Set; import java.util.Vector; +import com.codahale.metrics.MetricRegistry; import org.apache.solr.core.DirectoryFactory; import org.apache.solr.core.HdfsDirectoryFactory; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; import org.apache.solr.core.SolrEventListener; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.SchemaField; import org.apache.solr.util.plugin.SolrCoreAware; @@ -41,7 +44,7 @@ * @since solr 0.9 */ -public abstract class UpdateHandler implements SolrInfoMBean { +public abstract class UpdateHandler implements SolrInfoBean { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); protected final SolrCore core; @@ -55,6 +58,9 @@ public abstract class UpdateHandler implements SolrInfoMBean { protected final UpdateLog ulog; + protected Set metricNames = new HashSet<>(); + protected MetricRegistry registry; + private void parseEventListeners() { final Class clazz = SolrEventListener.class; final String label = "Event Listener"; @@ -221,4 +227,12 @@ public void registerOptimizeCallback( SolrEventListener listener ) public Category getCategory() { return Category.UPDATE; } + @Override + public Set getMetricNames() { + return metricNames; + } + @Override + public MetricRegistry getMetricRegistry() { + return registry; + } } diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java index 84a20052b999..c50add4a45e3 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java @@ -57,7 +57,7 @@ import org.apache.solr.common.util.IOUtils; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.request.LocalSolrQueryRequest; @@ -403,7 +403,7 @@ public void init(UpdateHandler uhandler, SolrCore core) { } } - core.getCoreMetricManager().registerMetricProducer(SolrInfoMBean.Category.TLOG.toString(), this); + core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.TLOG.toString(), this); } @Override @@ -422,12 +422,12 @@ public void initializeMetrics(SolrMetricManager manager, String registry, String } }; - manager.registerGauge(registry, bufferedOpsGauge, true, "ops", scope, "buffered"); - manager.registerGauge(registry, () -> logs.size(), true, "logs", scope, "replay", "remaining"); - manager.registerGauge(registry, () -> getTotalLogsSize(), true, "bytes", scope, "replay", "remaining"); - applyingBufferedOpsMeter = manager.meter(registry, "ops", scope, "applyingBuffered"); - replayOpsMeter = manager.meter(registry, "ops", scope, "replay"); - manager.registerGauge(registry, () -> state.getValue(), true, "state", scope); + manager.registerGauge(null, registry, bufferedOpsGauge, true, "ops", scope, "buffered"); + manager.registerGauge(null, registry, () -> logs.size(), true, "logs", scope, "replay", "remaining"); + manager.registerGauge(null, registry, () -> getTotalLogsSize(), true, "bytes", scope, "replay", "remaining"); + applyingBufferedOpsMeter = manager.meter(null, registry, "ops", scope, "applyingBuffered"); + replayOpsMeter = manager.meter(null, registry, "ops", scope, "replay"); + manager.registerGauge(null, registry, () -> state.getValue(), true, "state", scope); } /** diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java index 9d4eb7d1eedf..20132e133d74 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java @@ -17,10 +17,12 @@ package org.apache.solr.update; import java.lang.invoke.MethodHandles; -import java.net.URL; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; -import com.codahale.metrics.InstrumentedExecutorService; +import com.codahale.metrics.MetricRegistry; import org.apache.http.client.HttpClient; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; @@ -29,20 +31,20 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.util.ExecutorUtil; -import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SolrjNamedThreadFactory; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.util.stats.HttpClientMetricNameStrategy; import org.apache.solr.util.stats.InstrumentedHttpRequestExecutor; import org.apache.solr.util.stats.InstrumentedPoolingHttpClientConnectionManager; +import org.apache.solr.util.stats.MetricUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.util.stats.InstrumentedHttpRequestExecutor.KNOWN_METRIC_NAME_STRATEGIES; -public class UpdateShardHandler implements SolrMetricProducer, SolrInfoMBean { +public class UpdateShardHandler implements SolrMetricProducer, SolrInfoBean { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -56,8 +58,7 @@ public class UpdateShardHandler implements SolrMetricProducer, SolrInfoMBean { private ExecutorService updateExecutor = ExecutorUtil.newMDCAwareCachedThreadPool( new SolrjNamedThreadFactory("updateExecutor")); - private ExecutorService recoveryExecutor = ExecutorUtil.newMDCAwareCachedThreadPool( - new SolrjNamedThreadFactory("recoveryExecutor")); + private ExecutorService recoveryExecutor; private final CloseableHttpClient client; @@ -65,6 +66,9 @@ public class UpdateShardHandler implements SolrMetricProducer, SolrInfoMBean { private final InstrumentedHttpRequestExecutor httpRequestExecutor; + private final Set metricNames = new HashSet<>(); + private MetricRegistry registry; + public UpdateShardHandler(UpdateShardHandlerConfig cfg) { clientConnectionManager = new InstrumentedPoolingHttpClientConnectionManager(HttpClientUtil.getSchemaRegisteryProvider().getSchemaRegistry()); if (cfg != null ) { @@ -96,6 +100,15 @@ public UpdateShardHandler(UpdateShardHandlerConfig cfg) { clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, cfg.getMaxUpdateConnectionsPerHost()); } log.debug("Created UpdateShardHandler HTTP client with params: {}", clientParams); + + ThreadFactory recoveryThreadFactory = new SolrjNamedThreadFactory("recoveryExecutor"); + if (cfg != null && cfg.getMaxRecoveryThreads() > 0) { + log.debug("Creating recoveryExecutor with pool size {}", cfg.getMaxRecoveryThreads()); + recoveryExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(cfg.getMaxRecoveryThreads(), recoveryThreadFactory); + } else { + log.debug("Creating recoveryExecutor with unbounded pool"); + recoveryExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(recoveryThreadFactory); + } } @Override @@ -104,20 +117,14 @@ public String getName() { } @Override - public String getVersion() { - return getClass().getPackage().getSpecificationVersion(); - } - - @Override - public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); String expandedScope = SolrMetricManager.mkName(scope, getCategory().name()); - clientConnectionManager.initializeMetrics(manager, registry, expandedScope); - httpRequestExecutor.initializeMetrics(manager, registry, expandedScope); - updateExecutor = new InstrumentedExecutorService(updateExecutor, - manager.registry(registry), + clientConnectionManager.initializeMetrics(manager, registryName, expandedScope); + httpRequestExecutor.initializeMetrics(manager, registryName, expandedScope); + updateExecutor = MetricUtils.instrumentedExecutorService(updateExecutor, this, registry, SolrMetricManager.mkName("updateExecutor", expandedScope, "threadPool")); - recoveryExecutor = new InstrumentedExecutorService(recoveryExecutor, - manager.registry(registry), + recoveryExecutor = MetricUtils.instrumentedExecutorService(recoveryExecutor, this, registry, SolrMetricManager.mkName("recoveryExecutor", expandedScope, "threadPool")); } @@ -132,18 +139,13 @@ public Category getCategory() { } @Override - public String getSource() { - return null; - } - - @Override - public URL[] getDocs() { - return new URL[0]; + public Set getMetricNames() { + return metricNames; } @Override - public NamedList getStatistics() { - return null; + public MetricRegistry getMetricRegistry() { + return registry; } public HttpClient getHttpClient() { diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java index d31ce502fcf8..a8b6953bb7a2 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java @@ -23,11 +23,12 @@ public class UpdateShardHandlerConfig { public static final int DEFAULT_MAXUPDATECONNECTIONS = 100000; public static final int DEFAULT_MAXUPDATECONNECTIONSPERHOST = 100000; public static final String DEFAULT_METRICNAMESTRATEGY = "queryLessURLAndMethod"; + public static final int DEFAULT_MAXRECOVERYTHREADS = -1; public static final UpdateShardHandlerConfig DEFAULT = new UpdateShardHandlerConfig(DEFAULT_MAXUPDATECONNECTIONS, DEFAULT_MAXUPDATECONNECTIONSPERHOST, DEFAULT_DISTRIBUPDATESOTIMEOUT, DEFAULT_DISTRIBUPDATECONNTIMEOUT, - DEFAULT_METRICNAMESTRATEGY); + DEFAULT_METRICNAMESTRATEGY, DEFAULT_MAXRECOVERYTHREADS); private final int maxUpdateConnections; @@ -39,13 +40,16 @@ public class UpdateShardHandlerConfig { private final String metricNameStrategy; + private final int maxRecoveryThreads; + public UpdateShardHandlerConfig(int maxUpdateConnections, int maxUpdateConnectionsPerHost, int distributedSocketTimeout, int distributedConnectionTimeout, - String metricNameStrategy) { + String metricNameStrategy, int maxRecoveryThreads) { this.maxUpdateConnections = maxUpdateConnections; this.maxUpdateConnectionsPerHost = maxUpdateConnectionsPerHost; this.distributedSocketTimeout = distributedSocketTimeout; this.distributedConnectionTimeout = distributedConnectionTimeout; this.metricNameStrategy = metricNameStrategy; + this.maxRecoveryThreads = maxRecoveryThreads; } public int getMaxUpdateConnectionsPerHost() { @@ -67,4 +71,8 @@ public int getDistributedConnectionTimeout() { public String getMetricNameStrategy() { return metricNameStrategy; } + + public int getMaxRecoveryThreads() { + return maxRecoveryThreads; + } } diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java index 55d1fc8c032a..cb1b2fb68c6c 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java +++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java @@ -73,7 +73,7 @@ import org.apache.solr.common.util.Hash; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.Utils; -import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.CoreContainer; import org.apache.solr.handler.component.RealTimeGetComponent; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; @@ -311,17 +311,16 @@ public DistributedUpdateProcessor(SolrQueryRequest req, // this should always be used - see filterParams DistributedUpdateProcessorFactory.addParamToDistributedRequestWhitelist (this.req, UpdateParams.UPDATE_CHAIN, TEST_DISTRIB_SKIP_SERVERS, CommonParams.VERSION_FIELD); - - CoreDescriptor coreDesc = req.getCore().getCoreDescriptor(); - - this.zkEnabled = coreDesc.getCoreContainer().isZooKeeperAware(); - zkController = req.getCore().getCoreDescriptor().getCoreContainer().getZkController(); + + CoreContainer cc = req.getCore().getCoreContainer(); + + this.zkEnabled = cc.isZooKeeperAware(); + zkController = cc.getZkController(); if (zkEnabled) { - cmdDistrib = new SolrCmdDistributor(coreDesc.getCoreContainer().getUpdateShardHandler()); + cmdDistrib = new SolrCmdDistributor(cc.getUpdateShardHandler()); } //this.rsp = reqInfo != null ? reqInfo.getRsp() : null; - - cloudDesc = coreDesc.getCloudDescriptor(); + cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor(); if (cloudDesc != null) { collection = cloudDesc.getCollectionName(); @@ -597,7 +596,7 @@ private List getNodesByRoutingRules(ClusterState cstate, DocCollection col ZkStateReader.COLLECTION_PROP, collection, ZkStateReader.SHARD_ID_PROP, myShardId, "routeKey", routeKey + "!"); - SolrZkClient zkClient = req.getCore().getCoreDescriptor().getCoreContainer().getZkController().getZkClient(); + SolrZkClient zkClient = req.getCore().getCoreContainer().getZkController().getZkClient(); DistributedQueue queue = Overseer.getStateUpdateQueue(zkClient); queue.offer(Utils.toJSON(map)); } catch (KeeperException e) { @@ -948,7 +947,7 @@ private void doFinish() { Throwable rootCause = SolrException.getRootCause(error.e); log.error("Setting up to try to start recovery on replica {}", replicaUrl, rootCause); zkController.ensureReplicaInLeaderInitiatedRecovery( - req.getCore().getCoreDescriptor().getCoreContainer(), + req.getCore().getCoreContainer(), collection, shardId, stdNode.getNodeProps(), @@ -1302,7 +1301,7 @@ private long waitForDependentUpdates(AddUpdateCommand cmd, long versionOnUpdate, */ private UpdateCommand fetchFullUpdateFromLeader(AddUpdateCommand inplaceAdd, long versionOnUpdate) throws IOException { String id = inplaceAdd.getPrintableId(); - UpdateShardHandler updateShardHandler = inplaceAdd.getReq().getCore().getCoreDescriptor().getCoreContainer().getUpdateShardHandler(); + UpdateShardHandler updateShardHandler = inplaceAdd.getReq().getCore().getCoreContainer().getUpdateShardHandler(); ModifiableSolrParams params = new ModifiableSolrParams(); params.set(DISTRIB, false); params.set("getInputDocument", id); @@ -1742,7 +1741,7 @@ private void zkCheck() { // Streaming updates can delay shutdown and cause big update reorderings (new streams can't be // initiated, but existing streams carry on). This is why we check if the CC is shutdown. // See SOLR-8203 and loop HdfsChaosMonkeyNothingIsSafeTest (and check for inconsistent shards) to test. - if (req.getCore().getCoreDescriptor().getCoreContainer().isShutDown()) { + if (req.getCore().getCoreContainer().isShutDown()) { throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "CoreContainer is shutting down."); } @@ -1960,7 +1959,7 @@ public void finish() throws IOException { private List getCollectionUrls(SolrQueryRequest req, String collection) { - ClusterState clusterState = req.getCore().getCoreDescriptor() + ClusterState clusterState = req.getCore() .getCoreContainer().getZkController().getClusterState(); Map slices = clusterState.getSlicesMap(collection); if (slices == null) { diff --git a/solr/core/src/java/org/apache/solr/update/processor/DocExpirationUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/DocExpirationUpdateProcessorFactory.java index 332dba6bb647..c4234cbd20b3 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/DocExpirationUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/DocExpirationUpdateProcessorFactory.java @@ -454,7 +454,7 @@ public void run() { *

    */ private boolean iAmInChargeOfPeriodicDeletes() { - ZkController zk = core.getCoreDescriptor().getCoreContainer().getZkController(); + ZkController zk = core.getCoreContainer().getZkController(); if (null == zk) return true; diff --git a/solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java index 9c1a565605da..2f4de12a4480 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java +++ b/solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java @@ -134,7 +134,7 @@ public TolerantUpdateProcessor(SolrQueryRequest req, SolrQueryResponse rsp, Upda this.distribPhase = distribPhase; assert ! DistribPhase.FROMLEADER.equals(distribPhase); - this.zkController = this.req.getCore().getCoreDescriptor().getCoreContainer().getZkController(); + this.zkController = this.req.getCore().getCoreContainer().getZkController(); this.uniqueKeyField = this.req.getCore().getLatestSchema().getUniqueKeyField(); assert null != uniqueKeyField : "Factory didn't enforce uniqueKey field?"; } diff --git a/solr/core/src/java/org/apache/solr/util/JmxUtil.java b/solr/core/src/java/org/apache/solr/util/JmxUtil.java index 02a070d96912..f27a55e7efca 100644 --- a/solr/core/src/java/org/apache/solr/util/JmxUtil.java +++ b/solr/core/src/java/org/apache/solr/util/JmxUtil.java @@ -27,9 +27,6 @@ /** * Utility methods to find a MBeanServer. - * - * This was factored out from {@link org.apache.solr.core.JmxMonitoredMap} - * and can eventually replace the logic used there. */ public final class JmxUtil { diff --git a/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java b/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java index b79ec0c678b0..a60ada828baf 100644 --- a/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java +++ b/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java @@ -164,7 +164,7 @@ public String _format(LoggingEvent event) { sb.append(" " + core); } - zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); + zkController = core.getCoreContainer().getZkController(); if (zkController != null) { if (info.url == null) { info.url = zkController.getBaseUrl() + "/" + core.getName(); diff --git a/solr/core/src/java/org/apache/solr/util/stats/InstrumentedPoolingHttpClientConnectionManager.java b/solr/core/src/java/org/apache/solr/util/stats/InstrumentedPoolingHttpClientConnectionManager.java index 7bcabf8c2556..58ec69e0c9bb 100644 --- a/solr/core/src/java/org/apache/solr/util/stats/InstrumentedPoolingHttpClientConnectionManager.java +++ b/solr/core/src/java/org/apache/solr/util/stats/InstrumentedPoolingHttpClientConnectionManager.java @@ -35,10 +35,10 @@ public InstrumentedPoolingHttpClientConnectionManager(Registry getTotalStats().getAvailable(), true, SolrMetricManager.mkName("availableConnections", scope)); + manager.registerGauge(null, registry, () -> getTotalStats().getAvailable(), true, SolrMetricManager.mkName("availableConnections", scope)); // this acquires a lock on the connection pool; remove if contention sucks - manager.registerGauge(registry, () -> getTotalStats().getLeased(), true, SolrMetricManager.mkName("leasedConnections", scope)); - manager.registerGauge(registry, () -> getTotalStats().getMax(), true, SolrMetricManager.mkName("maxConnections", scope)); - manager.registerGauge(registry, () -> getTotalStats().getPending(), true, SolrMetricManager.mkName("pendingConnections", scope)); + manager.registerGauge(null, registry, () -> getTotalStats().getLeased(), true, SolrMetricManager.mkName("leasedConnections", scope)); + manager.registerGauge(null, registry, () -> getTotalStats().getMax(), true, SolrMetricManager.mkName("maxConnections", scope)); + manager.registerGauge(null, registry, () -> getTotalStats().getPending(), true, SolrMetricManager.mkName("pendingConnections", scope)); } } diff --git a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java index 491932d1a8d1..a190a0f0085b 100644 --- a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java +++ b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java @@ -16,9 +16,18 @@ */ package org.apache.solr.util.stats; +import java.beans.BeanInfo; +import java.beans.IntrospectionException; +import java.beans.Introspector; +import java.beans.PropertyDescriptor; import java.lang.invoke.MethodHandles; +import java.lang.management.OperatingSystemMXBean; +import java.lang.management.PlatformManagedObject; +import java.lang.reflect.InvocationTargetException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.SortedSet; @@ -39,7 +48,7 @@ import com.codahale.metrics.Timer; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.util.NamedList; -import org.apache.solr.common.util.SimpleOrderedMap; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.AggregateMetric; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,6 +85,20 @@ public class MetricUtils { static final String P999 = "p999"; static final String P999_MS = P999 + MS; + /** + * This filter can limit what properties of a metric are returned. + */ + public interface PropertyFilter { + PropertyFilter ALL = (name) -> true; + + /** + * Return only properties that match. + * @param name property name + * @return true if this property should be returned, false otherwise. + */ + boolean accept(String name); + } + /** * Adds metrics from a Timer to a NamedList, using well-known back-compat names. * @param lst The NamedList to add the metrics data to @@ -105,70 +128,30 @@ public static double nsToMs(double ns) { } /** - * Returns a NamedList representation of the given metric registry. Only those metrics - * are converted to NamedList which match at least one of the given MetricFilter instances. - * - * @param registry the {@link MetricRegistry} to be converted to NamedList - * @param shouldMatchFilters a list of {@link MetricFilter} instances. - * A metric must match any one of the filters from this list to be - * included in the output - * @param mustMatchFilter a {@link MetricFilter}. - * A metric must match this filter to be included in the output. - * @param skipHistograms discard any {@link Histogram}-s and histogram parts of {@link Timer}-s. - * @param compact use compact representation for counters and gauges. - * @param metadata optional metadata. If not null and not empty then this map will be added under a - * {@code _metadata_} key. - * @return a {@link NamedList} - */ - public static NamedList toNamedList(MetricRegistry registry, List shouldMatchFilters, - MetricFilter mustMatchFilter, boolean skipHistograms, - boolean skipAggregateValues, boolean compact, - Map metadata) { - NamedList result = new SimpleOrderedMap(); - toMaps(registry, shouldMatchFilters, mustMatchFilter, skipHistograms, skipAggregateValues, compact, (k, v) -> { - result.add(k, v); - }); - if (metadata != null && !metadata.isEmpty()) { - result.add("_metadata_", metadata); - } - return result; - } - - /** - * Returns a representation of the given metric registry as a list of {@link SolrInputDocument}-s. + * Provides a representation of the given metric registry as {@link SolrInputDocument}-s. Only those metrics - * are converted to NamedList which match at least one of the given MetricFilter instances. + * are converted which match at least one of the given MetricFilter instances. * - * @param registry the {@link MetricRegistry} to be converted to NamedList + * @param registry the {@link MetricRegistry} to be converted * @param shouldMatchFilters a list of {@link MetricFilter} instances. * A metric must match any one of the filters from this list to be * included in the output * @param mustMatchFilter a {@link MetricFilter}. * A metric must match this filter to be included in the output. + * @param propertyFilter limit what properties of a metric are returned * @param skipHistograms discard any {@link Histogram}-s and histogram parts of {@link Timer}-s. + * @param skipAggregateValues discard internal values of {@link AggregateMetric}-s. * @param compact use compact representation for counters and gauges. * @param metadata optional metadata. If not null and not empty then this map will be added under a * {@code _metadata_} key. - * @return a list of {@link SolrInputDocument}-s + * @param consumer consumer that accepts produced {@link SolrInputDocument}-s */ - public static List toSolrInputDocuments(MetricRegistry registry, List shouldMatchFilters, - MetricFilter mustMatchFilter, boolean skipHistograms, - boolean skipAggregateValues, boolean compact, - Map metadata) { - List result = new LinkedList<>(); - toSolrInputDocuments(registry, shouldMatchFilters, mustMatchFilter, skipHistograms, - skipAggregateValues, compact, metadata, doc -> { - result.add(doc); - }); - return result; - } - public static void toSolrInputDocuments(MetricRegistry registry, List shouldMatchFilters, - MetricFilter mustMatchFilter, boolean skipHistograms, + MetricFilter mustMatchFilter, PropertyFilter propertyFilter, boolean skipHistograms, boolean skipAggregateValues, boolean compact, Map metadata, Consumer consumer) { boolean addMetadata = metadata != null && !metadata.isEmpty(); - toMaps(registry, shouldMatchFilters, mustMatchFilter, skipHistograms, skipAggregateValues, compact, (k, v) -> { + toMaps(registry, shouldMatchFilters, mustMatchFilter, propertyFilter, skipHistograms, skipAggregateValues, compact, false, (k, v) -> { SolrInputDocument doc = new SolrInputDocument(); doc.setField(METRIC_NAME, k); toSolrInputDocument(null, doc, v); @@ -179,7 +162,13 @@ public static void toSolrInputDocuments(MetricRegistry registry, List shouldMatchFilters, - MetricFilter mustMatchFilter, boolean skipHistograms, boolean skipAggregateValues, - boolean compact, - BiConsumer consumer) { - Map metrics = registry.getMetrics(); - SortedSet names = registry.getNames(); + MetricFilter mustMatchFilter, PropertyFilter propertyFilter, + boolean skipHistograms, boolean skipAggregateValues, + boolean compact, boolean simple, + BiConsumer consumer) { + final Map metrics = registry.getMetrics(); + final SortedSet names = registry.getNames(); names.stream() .filter(s -> shouldMatchFilters.stream().anyMatch(metricFilter -> metricFilter.matches(s, metrics.get(s)))) .filter(s -> mustMatchFilter.matches(s, metrics.get(s))) .forEach(n -> { Metric metric = metrics.get(n); - if (metric instanceof Counter) { - Counter counter = (Counter) metric; - consumer.accept(n, convertCounter(counter, compact)); - } else if (metric instanceof Gauge) { - Gauge gauge = (Gauge) metric; - try { - consumer.accept(n, convertGauge(gauge, compact)); - } catch (InternalError ie) { - if (n.startsWith("memory.") && ie.getMessage().contains("Memory Pool not found")) { - LOG.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie); - consumer.accept(n, null); - } else { - throw ie; - } - } - } else if (metric instanceof Meter) { - Meter meter = (Meter) metric; - consumer.accept(n, convertMeter(meter)); - } else if (metric instanceof Timer) { - Timer timer = (Timer) metric; - consumer.accept(n, convertTimer(timer, skipHistograms)); - } else if (metric instanceof Histogram) { - if (!skipHistograms) { - Histogram histogram = (Histogram) metric; - consumer.accept(n, convertHistogram(histogram)); - } - } else if (metric instanceof AggregateMetric) { - consumer.accept(n, convertAggregateMetric((AggregateMetric)metric, skipAggregateValues)); - } + convertMetric(n, metric, propertyFilter, skipHistograms, skipAggregateValues, compact, simple, consumer); }); } - static Map convertAggregateMetric(AggregateMetric metric, boolean skipAggregateValues) { - Map response = new LinkedHashMap<>(); - response.put("count", metric.size()); - response.put(MAX, metric.getMax()); - response.put(MIN, metric.getMin()); - response.put(MEAN, metric.getMean()); - response.put(STDDEV, metric.getStdDev()); - response.put(SUM, metric.getSum()); - if (!(metric.isEmpty() || skipAggregateValues)) { - Map values = new LinkedHashMap<>(); - response.put(VALUES, values); - metric.getValues().forEach((k, v) -> { - Map map = new LinkedHashMap<>(); - map.put("value", v.value); - map.put("updateCount", v.updateCount.get()); - values.put(k, map); - }); + /** + * Convert selected metrics from a registry into a map, with metrics in a compact AND simple format. + * @param registry registry + * @param names metric names + * @return map where keys are metric names (if they were present in the registry) and values are + * converted metrics in simplified format. + */ + public static Map convertMetrics(MetricRegistry registry, Collection names) { + final Map metrics = new HashMap<>(); + convertMetrics(registry, names, false, true, true, true, (k, v) -> metrics.put(k, v)); + return metrics; + } + + /** + * Convert selected metrics from a registry into maps (when compact==false) or + * flattened objects. + * @param registry registry + * @param names metric names + * @param skipHistograms discard any {@link Histogram}-s and histogram parts of {@link Timer}-s. + * @param skipAggregateValues discard internal values of {@link AggregateMetric}-s. + * @param compact use compact representation for counters and gauges. + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param consumer consumer that accepts produced objects + */ + public static void convertMetrics(MetricRegistry registry, Collection names, + boolean skipHistograms, boolean skipAggregateValues, + boolean compact, boolean simple, + BiConsumer consumer) { + final Map metrics = registry.getMetrics(); + names.stream() + .forEach(n -> { + Metric metric = metrics.get(n); + convertMetric(n, metric, PropertyFilter.ALL, skipHistograms, skipAggregateValues, compact, simple, consumer); + }); + } + + /** + * Convert a single instance of metric into a map or flattened object. + * @param n metric name + * @param metric metric instance + * @param propertyFilter limit what properties of a metric are returned + * @param skipHistograms discard any {@link Histogram}-s and histogram parts of {@link Timer}-s. + * @param skipAggregateValues discard internal values of {@link AggregateMetric}-s. + * @param compact use compact representation for counters and gauges. + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param consumer consumer that accepts produced objects + */ + static void convertMetric(String n, Metric metric, PropertyFilter propertyFilter, boolean skipHistograms, boolean skipAggregateValues, + boolean compact, boolean simple, BiConsumer consumer) { + if (metric instanceof Counter) { + Counter counter = (Counter) metric; + convertCounter(n, counter, propertyFilter, compact, consumer); + } else if (metric instanceof Gauge) { + Gauge gauge = (Gauge) metric; + try { + convertGauge(n, gauge, propertyFilter, simple, compact, consumer); + } catch (InternalError ie) { + if (n.startsWith("memory.") && ie.getMessage().contains("Memory Pool not found")) { + LOG.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie); + consumer.accept(n, null); + } else { + throw ie; + } + } + } else if (metric instanceof Meter) { + Meter meter = (Meter) metric; + convertMeter(n, meter, propertyFilter, simple, consumer); + } else if (metric instanceof Timer) { + Timer timer = (Timer) metric; + convertTimer(n, timer, propertyFilter, skipHistograms, simple, consumer); + } else if (metric instanceof Histogram) { + if (!skipHistograms) { + Histogram histogram = (Histogram) metric; + convertHistogram(n, histogram, propertyFilter, simple, consumer); + } + } else if (metric instanceof AggregateMetric) { + convertAggregateMetric(n, (AggregateMetric)metric, propertyFilter, skipAggregateValues, simple, consumer); + } + } + + /** + * Convert an instance of {@link AggregateMetric}. + * @param name metric name + * @param metric an instance of {@link AggregateMetric} + * @param propertyFilter limit what properties of a metric are returned + * @param skipAggregateValues discard internal values of {@link AggregateMetric}-s. + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param consumer consumer that accepts produced objects + */ + static void convertAggregateMetric(String name, AggregateMetric metric, + PropertyFilter propertyFilter, + boolean skipAggregateValues, boolean simple, BiConsumer consumer) { + if (simple) { + if (propertyFilter.accept(MEAN)) { + consumer.accept(name + "." + MEAN, metric.getMean()); + } + } else { + Map response = new LinkedHashMap<>(); + BiConsumer filter = (k, v) -> { + if (propertyFilter.accept(k)) { + response.put(k, v); + } + }; + filter.accept("count", metric.size()); + filter.accept(MAX, metric.getMax()); + filter.accept(MIN, metric.getMin()); + filter.accept(MEAN, metric.getMean()); + filter.accept(STDDEV, metric.getStdDev()); + filter.accept(SUM, metric.getSum()); + if (!(metric.isEmpty() || skipAggregateValues)) { + Map values = new LinkedHashMap<>(); + response.put(VALUES, values); + metric.getValues().forEach((k, v) -> { + Map map = new LinkedHashMap<>(); + map.put("value", v.value); + map.put("updateCount", v.updateCount.get()); + values.put(k, map); + }); + } + if (!response.isEmpty()) { + consumer.accept(name, response); + } } - return response; } - static Map convertHistogram(Histogram histogram) { - Map response = new LinkedHashMap<>(); + /** + * Convert an instance of {@link Histogram}. NOTE: it's assumed that histogram contains non-time + * based values that don't require unit conversion. + * @param name metric name + * @param histogram an instance of {@link Histogram} + * @param propertyFilter limit what properties of a metric are returned + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param consumer consumer that accepts produced objects + */ + static void convertHistogram(String name, Histogram histogram, PropertyFilter propertyFilter, + boolean simple, BiConsumer consumer) { Snapshot snapshot = histogram.getSnapshot(); - response.put("count", histogram.getCount()); - // non-time based values - addSnapshot(response, snapshot, false); - return response; + if (simple) { + if (propertyFilter.accept(MEAN)) { + consumer.accept(name + "." + MEAN, snapshot.getMean()); + } + } else { + Map response = new LinkedHashMap<>(); + String prop = "count"; + if (propertyFilter.accept(prop)) { + response.put(prop, histogram.getCount()); + } + // non-time based values + addSnapshot(response, snapshot, propertyFilter, false); + if (!response.isEmpty()) { + consumer.accept(name, response); + } + } } // optionally convert ns to ms @@ -279,66 +383,258 @@ static double nsToMs(boolean convert, double value) { } // some snapshots represent time in ns, other snapshots represent raw values (eg. chunk size) - static void addSnapshot(Map response, Snapshot snapshot, boolean ms) { - response.put((ms ? MIN_MS: MIN), nsToMs(ms, snapshot.getMin())); - response.put((ms ? MAX_MS: MAX), nsToMs(ms, snapshot.getMax())); - response.put((ms ? MEAN_MS : MEAN), nsToMs(ms, snapshot.getMean())); - response.put((ms ? MEDIAN_MS: MEDIAN), nsToMs(ms, snapshot.getMedian())); - response.put((ms ? STDDEV_MS: STDDEV), nsToMs(ms, snapshot.getStdDev())); - response.put((ms ? P75_MS: P75), nsToMs(ms, snapshot.get75thPercentile())); - response.put((ms ? P95_MS: P95), nsToMs(ms, snapshot.get95thPercentile())); - response.put((ms ? P99_MS: P99), nsToMs(ms, snapshot.get99thPercentile())); - response.put((ms ? P999_MS: P999), nsToMs(ms, snapshot.get999thPercentile())); + static void addSnapshot(Map response, Snapshot snapshot, PropertyFilter propertyFilter, boolean ms) { + BiConsumer filter = (k, v) -> { + if (propertyFilter.accept(k)) { + response.put(k, v); + } + }; + filter.accept((ms ? MIN_MS: MIN), nsToMs(ms, snapshot.getMin())); + filter.accept((ms ? MAX_MS: MAX), nsToMs(ms, snapshot.getMax())); + filter.accept((ms ? MEAN_MS : MEAN), nsToMs(ms, snapshot.getMean())); + filter.accept((ms ? MEDIAN_MS: MEDIAN), nsToMs(ms, snapshot.getMedian())); + filter.accept((ms ? STDDEV_MS: STDDEV), nsToMs(ms, snapshot.getStdDev())); + filter.accept((ms ? P75_MS: P75), nsToMs(ms, snapshot.get75thPercentile())); + filter.accept((ms ? P95_MS: P95), nsToMs(ms, snapshot.get95thPercentile())); + filter.accept((ms ? P99_MS: P99), nsToMs(ms, snapshot.get99thPercentile())); + filter.accept((ms ? P999_MS: P999), nsToMs(ms, snapshot.get999thPercentile())); } - static Map convertTimer(Timer timer, boolean skipHistograms) { - Map response = new LinkedHashMap<>(); - response.put("count", timer.getCount()); - response.put("meanRate", timer.getMeanRate()); - response.put("1minRate", timer.getOneMinuteRate()); - response.put("5minRate", timer.getFiveMinuteRate()); - response.put("15minRate", timer.getFifteenMinuteRate()); - if (!skipHistograms) { - // time-based values in nanoseconds - addSnapshot(response, timer.getSnapshot(), true); + /** + * Convert a {@link Timer} to a map. + * @param name metric name + * @param timer timer instance + * @param propertyFilter limit what properties of a metric are returned + * @param skipHistograms if true then discard the histogram part of the timer. + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param consumer consumer that accepts produced objects + */ + public static void convertTimer(String name, Timer timer, PropertyFilter propertyFilter, boolean skipHistograms, + boolean simple, BiConsumer consumer) { + if (simple) { + String prop = "meanRate"; + if (propertyFilter.accept(prop)) { + consumer.accept(name + "." + prop, timer.getMeanRate()); + } + } else { + Map response = new LinkedHashMap<>(); + BiConsumer filter = (k, v) -> { + if (propertyFilter.accept(k)) { + response.put(k, v); + } + }; + filter.accept("count", timer.getCount()); + filter.accept("meanRate", timer.getMeanRate()); + filter.accept("1minRate", timer.getOneMinuteRate()); + filter.accept("5minRate", timer.getFiveMinuteRate()); + filter.accept("15minRate", timer.getFifteenMinuteRate()); + if (!skipHistograms) { + // time-based values in nanoseconds + addSnapshot(response, timer.getSnapshot(), propertyFilter, true); + } + if (!response.isEmpty()) { + consumer.accept(name, response); + } } - return response; } - static Map convertMeter(Meter meter) { - Map response = new LinkedHashMap<>(); - response.put("count", meter.getCount()); - response.put("meanRate", meter.getMeanRate()); - response.put("1minRate", meter.getOneMinuteRate()); - response.put("5minRate", meter.getFiveMinuteRate()); - response.put("15minRate", meter.getFifteenMinuteRate()); - return response; + /** + * Convert a {@link Meter} to a map. + * @param name metric name + * @param meter meter instance + * @param propertyFilter limit what properties of a metric are returned + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param consumer consumer that accepts produced objects + */ + static void convertMeter(String name, Meter meter, PropertyFilter propertyFilter, boolean simple, BiConsumer consumer) { + if (simple) { + if (propertyFilter.accept("count")) { + consumer.accept(name + ".count", meter.getCount()); + } + } else { + Map response = new LinkedHashMap<>(); + BiConsumer filter = (k, v) -> { + if (propertyFilter.accept(k)) { + response.put(k, v); + } + }; + filter.accept("count", meter.getCount()); + filter.accept("meanRate", meter.getMeanRate()); + filter.accept("1minRate", meter.getOneMinuteRate()); + filter.accept("5minRate", meter.getFiveMinuteRate()); + filter.accept("15minRate", meter.getFifteenMinuteRate()); + if (!response.isEmpty()) { + consumer.accept(name, response); + } + } } - static Object convertGauge(Gauge gauge, boolean compact) { - if (compact) { - return gauge.getValue(); + /** + * Convert a {@link Gauge}. + * @param name metric name + * @param gauge gauge instance + * @param propertyFilter limit what properties of a metric are returned + * @param simple use simplified representation for complex metrics - instead of a (name, map) + * only the selected (name "." key, value) pairs will be produced. + * @param compact if true then only return {@link Gauge#getValue()}. If false + * then return a map with a "value" field. + * @param consumer consumer that accepts produced objects + */ + static void convertGauge(String name, Gauge gauge, PropertyFilter propertyFilter, boolean simple, boolean compact, + BiConsumer consumer) { + if (compact || simple) { + Object o = gauge.getValue(); + if (o instanceof Map) { + if (simple) { + for (Map.Entry entry : ((Map)o).entrySet()) { + String prop = entry.getKey().toString(); + if (propertyFilter.accept(prop)) { + consumer.accept(name + "." + prop, entry.getValue()); + } + } + } else { + Map val = new HashMap<>(); + for (Map.Entry entry : ((Map)o).entrySet()) { + String prop = entry.getKey().toString(); + if (propertyFilter.accept(prop)) { + val.put(prop, entry.getValue()); + } + } + if (!val.isEmpty()) { + consumer.accept(name, val); + } + } + } else { + consumer.accept(name, o); + } } else { + Object o = gauge.getValue(); Map response = new LinkedHashMap<>(); - response.put("value", gauge.getValue()); - return response; + if (o instanceof Map) { + for (Map.Entry entry : ((Map)o).entrySet()) { + String prop = entry.getKey().toString(); + if (propertyFilter.accept(prop)) { + response.put(prop, entry.getValue()); + } + } + if (!response.isEmpty()) { + consumer.accept(name, Collections.singletonMap("value", response)); + } + } else { + if (propertyFilter.accept("value")) { + response.put("value", o); + consumer.accept(name, response); + } + } } } - static Object convertCounter(Counter counter, boolean compact) { + /** + * Convert a {@link Counter} + * @param counter counter instance + * @param propertyFilter limit what properties of a metric are returned + * @param compact if true then only return {@link Counter#getCount()}. If false + * then return a map with a "count" field. + */ + static void convertCounter(String name, Counter counter, PropertyFilter propertyFilter, boolean compact, BiConsumer consumer) { if (compact) { - return counter.getCount(); + consumer.accept(name, counter.getCount()); } else { - Map response = new LinkedHashMap<>(); - response.put("count", counter.getCount()); - return response; + if (propertyFilter.accept("count")) { + Map response = new LinkedHashMap<>(); + response.put("count", counter.getCount()); + consumer.accept(name, response); + } } } /** * Returns an instrumented wrapper over the given executor service. */ - public static ExecutorService instrumentedExecutorService(ExecutorService delegate, MetricRegistry metricRegistry, String scope) { + public static ExecutorService instrumentedExecutorService(ExecutorService delegate, SolrInfoBean info, MetricRegistry metricRegistry, String scope) { + if (info != null && info.getMetricNames() != null) { + info.getMetricNames().add(MetricRegistry.name(scope, "submitted")); + info.getMetricNames().add(MetricRegistry.name(scope, "running")); + info.getMetricNames().add(MetricRegistry.name(scope, "completed")); + info.getMetricNames().add(MetricRegistry.name(scope, "duration")); + } return new InstrumentedExecutorService(delegate, metricRegistry, scope); } + + /** + * Creates a set of metrics (gauges) that correspond to available bean properties for the provided MXBean. + * @param obj an instance of MXBean + * @param intf MXBean interface, one of {@link PlatformManagedObject}-s + * @param consumer consumer for created names and metrics + * @param formal type + */ + public static void addMXBeanMetrics(T obj, Class intf, + String prefix, BiConsumer consumer) { + if (intf.isInstance(obj)) { + BeanInfo beanInfo; + try { + beanInfo = Introspector.getBeanInfo(intf, intf.getSuperclass(), Introspector.IGNORE_ALL_BEANINFO); + } catch (IntrospectionException e) { + LOG.warn("Unable to fetch properties of MXBean " + obj.getClass().getName()); + return; + } + for (final PropertyDescriptor desc : beanInfo.getPropertyDescriptors()) { + final String name = desc.getName(); + // test if it works at all + try { + desc.getReadMethod().invoke(obj); + // worked - consume it + final Gauge gauge = () -> { + try { + return desc.getReadMethod().invoke(obj); + } catch (InvocationTargetException ite) { + // ignore (some properties throw UOE) + return null; + } catch (IllegalAccessException e) { + return null; + } + }; + String metricName = MetricRegistry.name(prefix, name); + consumer.accept(metricName, gauge); + } catch (Exception e) { + // didn't work, skip it... + } + } + } + } + + /** + * These are well-known implementations of {@link java.lang.management.OperatingSystemMXBean}. + * Some of them provide additional useful properties beyond those declared by the interface. + */ + public static String[] OS_MXBEAN_CLASSES = new String[] { + OperatingSystemMXBean.class.getName(), + "com.sun.management.OperatingSystemMXBean", + "com.sun.management.UnixOperatingSystemMXBean", + "com.ibm.lang.management.OperatingSystemMXBean" + }; + + /** + * Creates a set of metrics (gauges) that correspond to available bean properties for the provided MXBean. + * @param obj an instance of MXBean + * @param interfaces interfaces that it may implement. Each interface will be tried in turn, and only + * if it exists and if it contains unique properties then they will be added as metrics. + * @param prefix optional prefix for metric names + * @param consumer consumer for created names and metrics + * @param formal type + */ + public static void addMXBeanMetrics(T obj, String[] interfaces, + String prefix, BiConsumer consumer) { + for (String clazz : interfaces) { + try { + final Class intf = Class.forName(clazz) + .asSubclass(PlatformManagedObject.class); + MetricUtils.addMXBeanMetrics(obj, intf, null, consumer); + } catch (ClassNotFoundException e) { + // ignore + } + } + } } diff --git a/solr/core/src/resources/apispec/cluster.aliases.json b/solr/core/src/resources/apispec/cluster.aliases.json new file mode 100644 index 000000000000..9cffb7142378 --- /dev/null +++ b/solr/core/src/resources/apispec/cluster.aliases.json @@ -0,0 +1,12 @@ +{ + "documentation": "https://cwiki.apache.org/confluence/display/solr/Collections+API", + "description": "Provides list of collection alises.", + "methods": [ + "GET" + ], + "url": { + "paths": [ + "/cluster/aliases" + ] + } +} diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-point.xml b/solr/core/src/test-files/solr/collection1/conf/schema-point.xml index 1a936a04c643..ae6a11ecfef4 100644 --- a/solr/core/src/test-files/solr/collection1/conf/schema-point.xml +++ b/solr/core/src/test-files/solr/collection1/conf/schema-point.xml @@ -54,6 +54,7 @@ + @@ -66,6 +67,7 @@ + @@ -78,6 +80,7 @@ + @@ -90,6 +93,7 @@ + @@ -102,6 +106,7 @@ + diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-collapseqparser.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-collapseqparser.xml index ff19baaba1f4..3ac0b5082f51 100644 --- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-collapseqparser.xml +++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-collapseqparser.xml @@ -239,75 +239,14 @@ based HashBitset. --> - lowerpunctfilt + a_s - - default - lowerfilt - spellchecker1 - false - - + direct DirectSolrSpellChecker - lowerfilt + a_s 3 - - - wordbreak - solr.WordBreakSolrSpellChecker - lowerfilt - true - true - 10 - - - multipleFields - lowerfilt1and2 - spellcheckerMultipleFields - false - - - - jarowinkler - lowerfilt - - org.apache.lucene.search.spell.JaroWinklerDistance - spellchecker2 - - - - solr.FileBasedSpellChecker - external - spellings.txt - UTF-8 - spellchecker3 - - - - freq - lowerfilt - spellcheckerFreq - - freq - false - - - fqcn - lowerfilt - spellcheckerFQCN - org.apache.solr.spelling.SampleComparator - false - - - perDict - org.apache.solr.handler.component.DummyCustomParamSpellChecker - lowerfilt - + @@ -323,19 +262,6 @@ based HashBitset. --> --> - - - - false - - false - - 1 - - - spellcheck - - direct @@ -347,35 +273,6 @@ based HashBitset. --> spellcheck - - - default - wordbreak - 20 - - - spellcheck - - - - - direct - wordbreak - 20 - - - spellcheck - - - - - dismax - lowerfilt1^1 - - - spellcheck - - diff --git a/solr/core/src/test-files/solr/solr-hiddensysprops.xml b/solr/core/src/test-files/solr/solr-hiddensysprops.xml new file mode 100644 index 000000000000..20e5aec21754 --- /dev/null +++ b/solr/core/src/test-files/solr/solr-hiddensysprops.xml @@ -0,0 +1,31 @@ + + + + + + + foo + bar + baz + + + + configured + + + diff --git a/solr/core/src/test-files/solr/solr-jmxreporter.xml b/solr/core/src/test-files/solr/solr-jmxreporter.xml new file mode 100644 index 000000000000..bb9d05de1428 --- /dev/null +++ b/solr/core/src/test-files/solr/solr-jmxreporter.xml @@ -0,0 +1,43 @@ + + + + + + ${urlScheme:} + ${socketTimeout:90000} + ${connTimeout:15000} + + + + 127.0.0.1 + ${hostPort:8983} + ${hostContext:solr} + ${solr.zkclienttimeout:30000} + ${genericCoreNodeNames:true} + ${leaderVoteWait:10000} + ${distribUpdateConnTimeout:45000} + ${distribUpdateSoTimeout:340000} + ${autoReplicaFailoverWaitAfterExpiration:10000} + ${autoReplicaFailoverWorkLoopDelay:10000} + ${autoReplicaFailoverBadNodeExpiration:60000} + + + + + + diff --git a/solr/core/src/test-files/solr/solr-solrreporter.xml b/solr/core/src/test-files/solr/solr-solrreporter.xml index db03e4218877..a66d9d096e10 100644 --- a/solr/core/src/test-files/solr/solr-solrreporter.xml +++ b/solr/core/src/test-files/solr/solr-solrreporter.xml @@ -38,6 +38,10 @@ + + + false + 5 UPDATE\./update/.*requests diff --git a/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java b/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java index f4a14dba419a..02ae888bd7d0 100644 --- a/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java +++ b/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java @@ -27,6 +27,8 @@ import java.util.List; import java.util.Map; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.LazyDocument; @@ -38,6 +40,7 @@ import org.apache.solr.common.util.NamedList; import org.apache.solr.core.SolrCore; import org.apache.solr.handler.RequestHandlerBase; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; @@ -122,10 +125,14 @@ public void testSomeStuff() throws Exception { assertNotNull(core.getRequestHandler("mock")); // test stats call - NamedList stats = core.getStatistics(); - assertEquals("collection1", stats.get("coreName")); - assertTrue(stats.get("refCount") != null); - + SolrMetricManager manager = core.getCoreContainer().getMetricManager(); + String registry = core.getCoreMetricManager().getRegistryName(); + Map metrics = manager.registry(registry).getMetrics(); + assertTrue(metrics.containsKey("CORE.coreName")); + assertTrue(metrics.containsKey("CORE.refCount")); + Gauge g = (Gauge)metrics.get("CORE.refCount"); + assertTrue(g.getValue().intValue() > 0); + lrf.args.put(CommonParams.VERSION,"2.2"); assertQ("test query on empty index", req("qlkciyopsbgzyvkylsjhchghjrdf") @@ -378,8 +385,6 @@ public void testRequestHandlerBaseException() { @Override public String getDescription() { return tmp; } @Override - public String getSource() { return tmp; } - @Override public void handleRequestBody ( SolrQueryRequest req, SolrQueryResponse rsp ) { throw new RuntimeException(tmp); diff --git a/solr/core/src/test/org/apache/solr/CursorPagingTest.java b/solr/core/src/test/org/apache/solr/CursorPagingTest.java index b204677f8c58..eb1c6bc6fe97 100644 --- a/solr/core/src/test/org/apache/solr/CursorPagingTest.java +++ b/solr/core/src/test/org/apache/solr/CursorPagingTest.java @@ -19,7 +19,6 @@ import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.SentinelIntSet; import org.apache.lucene.util.mutable.MutableValueInt; -import org.apache.solr.core.SolrInfoMBean; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.params.CursorMarkParams; import org.apache.solr.common.params.SolrParams; @@ -32,6 +31,7 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.search.CursorMark; //jdoc import org.noggit.ObjectBuilder; @@ -521,16 +521,16 @@ public void testCacheImpacts() throws Exception { final Collection allFieldNames = getAllSortFieldNames(); - final SolrInfoMBean filterCacheStats - = h.getCore().getInfoRegistry().get("filterCache"); + final MetricsMap filterCacheStats = + (MetricsMap)h.getCore().getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.filterCache"); assertNotNull(filterCacheStats); - final SolrInfoMBean queryCacheStats - = h.getCore().getInfoRegistry().get("queryResultCache"); + final MetricsMap queryCacheStats = + (MetricsMap)h.getCore().getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache"); assertNotNull(queryCacheStats); - final long preQcIn = (Long) queryCacheStats.getStatistics().get("inserts"); - final long preFcIn = (Long) filterCacheStats.getStatistics().get("inserts"); - final long preFcHits = (Long) filterCacheStats.getStatistics().get("hits"); + final long preQcIn = (Long) queryCacheStats.getValue().get("inserts"); + final long preFcIn = (Long) filterCacheStats.getValue().get("inserts"); + final long preFcHits = (Long) filterCacheStats.getValue().get("hits"); SentinelIntSet ids = assertFullWalkNoDups (10, params("q", "*:*", @@ -542,9 +542,9 @@ public void testCacheImpacts() throws Exception { assertEquals(6, ids.size()); - final long postQcIn = (Long) queryCacheStats.getStatistics().get("inserts"); - final long postFcIn = (Long) filterCacheStats.getStatistics().get("inserts"); - final long postFcHits = (Long) filterCacheStats.getStatistics().get("hits"); + final long postQcIn = (Long) queryCacheStats.getValue().get("inserts"); + final long postFcIn = (Long) filterCacheStats.getValue().get("inserts"); + final long postFcHits = (Long) filterCacheStats.getValue().get("hits"); assertEquals("query cache inserts changed", preQcIn, postQcIn); // NOTE: use of pure negative filters causees "*:* to be tracked in filterCache diff --git a/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java b/solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java similarity index 82% rename from solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java rename to solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java index bfe231627ad8..d39c87fad0b1 100644 --- a/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java +++ b/solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java @@ -16,11 +16,14 @@ */ package org.apache.solr; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.lucene.util.TestUtil; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.handler.StandardRequestHandler; import org.apache.solr.handler.admin.LukeRequestHandler; import org.apache.solr.handler.component.SearchComponent; import org.apache.solr.highlight.DefaultSolrHighlighter; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.search.LRUCache; import org.junit.BeforeClass; import java.io.File; @@ -33,7 +36,7 @@ /** * A simple test used to increase code coverage for some standard things... */ -public class SolrInfoMBeanTest extends SolrTestCaseJ4 +public class SolrInfoBeanTest extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { @@ -54,10 +57,16 @@ public void testCallMBeanInfo() throws Exception { // System.out.println(classes); int checked = 0; + SolrMetricManager metricManager = h.getCoreContainer().getMetricManager(); + String registry = h.getCore().getCoreMetricManager().getRegistryName(); + String scope = TestUtil.randomSimpleString(random(), 2, 10); for( Class clazz : classes ) { - if( SolrInfoMBean.class.isAssignableFrom( clazz ) ) { + if( SolrInfoBean.class.isAssignableFrom( clazz ) ) { try { - SolrInfoMBean info = (SolrInfoMBean)clazz.newInstance(); + SolrInfoBean info = (SolrInfoBean)clazz.newInstance(); + if (info instanceof SolrMetricProducer) { + ((SolrMetricProducer)info).initializeMetrics(metricManager, registry, scope); + } //System.out.println( info.getClass() ); assertNotNull( info.getName() ); @@ -69,9 +78,6 @@ public void testCallMBeanInfo() throws Exception { } assertNotNull( info.toString() ); - // increase code coverage... - assertNotNull( info.getDocs() + "" ); - assertNotNull( info.getStatistics()+"" ); checked++; } catch( InstantiationException ex ) { @@ -80,7 +86,7 @@ public void testCallMBeanInfo() throws Exception { } } } - assertTrue( "there are at least 10 SolrInfoMBean that should be found in the classpath, found " + checked, checked > 10 ); + assertTrue( "there are at least 10 SolrInfoBean that should be found in the classpath, found " + checked, checked > 10 ); } private static List getClassesForPackage(String pckgname) throws Exception { diff --git a/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java b/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java index 885b5003f680..6818f8b4621f 100644 --- a/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java +++ b/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java @@ -155,7 +155,6 @@ public void test() throws Exception { // The second sort = id asc . The sorting behaviour is different in dist mode. See TopDocs#merge // The shard the result came from matters in the order if both document sortvalues are equal query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", i1 + " asc, id asc"); - query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 10, "sort", i1 + " asc, id asc"); query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 0, "sort", i1 + " asc, id asc"); query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", "id asc, _docid_ asc"); query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", "{!func}add(" + i1 + ",5) asc, id asc"); @@ -163,8 +162,6 @@ public void test() throws Exception { query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", i1 + " asc, id asc", "stats", "true", "stats.field", tlong); query("q", "kings", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", i1 + " asc, id asc", "spellcheck", "true", "spellcheck.build", "true", "qt", "spellCheckCompRH"); query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", i1 + " asc, id asc", "facet", "true", "hl","true","hl.fl",t1); - - query("q", "*:*", "rows", 10, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 2, "sort", i1 + " asc, id asc", "group.sort", "score desc,id desc"); query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", -1, "sort", i1 + " asc, id asc", "group.sort", "id desc"); query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.offset", 5, "group.limit", -1, "sort", i1 + " asc, id asc"); @@ -283,7 +280,6 @@ public void test() throws Exception { "group.field", i1, "group.limit", -1, "sort", tlong+" asc, id desc", "group.sort", "id asc"); - rsp = query("q", "{!func}id", "fq", oddField+":[* TO *]", "rows", 100, "fl", tlong + ",id," + i1, "group", "true", "group.field", i1, "group.limit", -1, @@ -300,20 +296,29 @@ public void test() throws Exception { assertEquals(docs.toString(), 22, docs.get(0).getFirstValue("id")); assertEquals(docs.toString(), 21, docs.get(4).getFirstValue("id")); + + // Can't validate the response, but can check if no errors occur. simpleQuery("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.query", t1 + ":kings OR " + t1 + ":eggs", "group.limit", 10, "sort", i1 + " asc, id asc", CommonParams.TIME_ALLOWED, 1); //Debug simpleQuery("q", "*:*", "rows", 10, "fl", "id," + i1, "group", "true", "group.field", i1, "debug", "true"); - // sorting on user-defined field - query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 10, "sort", i1 + " asc, abs(sub(5,id)) asc, id asc", "group.sort", "abs(sub(7,id)) asc,id desc"); - - - // sorting on user-defined field - query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 10, "sort", i1 + " asc, abs(sub(5,id)) asc, id asc", "group.sort", "abs(sub(7,id)) desc,score desc, id asc"); - - + // SOLR-6203 sorting on user-defined field + { + // abs(sub(7,id)) is the user-defined sort field, can be ascending or descending + // (ascending or descending) score may or may not be included + // id can be ascending or descending + String groupSort = "abs(sub(7,id)) "+(random().nextBoolean() ? "asc" : "desc"); + if (random().nextBoolean()) { + groupSort += ", score desc"; + } + groupSort += (", id "+(random().nextBoolean() ? "asc" : "desc")); + query("q", "*:*", "rows", 100, "fl", "id," + i1, + "group", "true", "group.field", i1, "group.limit", 10, + "sort", i1 + " asc, abs(sub(5,id)) asc, id asc", + "group.sort", groupSort); + } } private void simpleQuery(Object... queryParams) throws SolrServerException, IOException { diff --git a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java index e65972764ee6..2d465519fedc 100644 --- a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java +++ b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java @@ -838,7 +838,7 @@ public void testRandomGrouping() throws Exception { Object realResponse = ObjectBuilder.fromJSON(strResponse); String err = JSONTestUtil.matchObj("/grouped/" + groupField, realResponse, modelResponse); if (err != null) { - log.error("GROUPING MISMATCH: " + err + log.error("GROUPING MISMATCH (" + queryIter + "): " + err + "\n\trequest="+req + "\n\tresult="+strResponse + "\n\texpected="+ JSONUtil.toJSON(modelResponse) @@ -854,7 +854,7 @@ public void testRandomGrouping() throws Exception { // assert post / pre grouping facets err = JSONTestUtil.matchObj("/facet_counts/facet_fields/"+FOO_STRING_FIELD, realResponse, expectedFacetResponse); if (err != null) { - log.error("GROUPING MISMATCH: " + err + log.error("GROUPING MISMATCH (" + queryIter + "): " + err + "\n\trequest="+req + "\n\tresult="+strResponse + "\n\texpected="+ JSONUtil.toJSON(expectedFacetResponse) diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java index 6ca072b1ba84..869650df121e 100644 --- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java @@ -57,6 +57,10 @@ public void test() throws Exception { CollectionAdminRequest.createAlias("testalias", "collection1").process(cluster.getSolrClient()); + // ensure that the alias has been registered + assertEquals("collection1", + new CollectionAdminRequest.ListAliases().process(cluster.getSolrClient()).getAliases().get("testalias")); + // search for alias QueryResponse res = cluster.getSolrClient().query("testalias", new SolrQuery("*:*")); assertEquals(3, res.getResults().getNumFound()); diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index d1dbe9c4319c..1c23c9cf678e 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -59,6 +59,7 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; +import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -752,19 +753,28 @@ private void testNumberOfCommitsWithCommitAfterAdd() private Long getNumCommits(HttpSolrClient sourceClient) throws SolrServerException, IOException { - try (HttpSolrClient client = getHttpSolrClient(sourceClient.getBaseURL())) { + // construct the /admin/metrics URL + URL url = new URL(sourceClient.getBaseURL()); + String path = url.getPath().substring(1); + String[] elements = path.split("/"); + String collection = elements[elements.length - 1]; + String urlString = url.toString(); + urlString = urlString.substring(0, urlString.length() - collection.length() - 1); + try (HttpSolrClient client = getHttpSolrClient(urlString)) { client.setConnectionTimeout(15000); client.setSoTimeout(60000); ModifiableSolrParams params = new ModifiableSolrParams(); - params.set("qt", "/admin/mbeans?key=updateHandler&stats=true"); + //params.set("qt", "/admin/metrics?prefix=UPDATE.updateHandler®istry=solr.core." + collection); + params.set("qt", "/admin/metrics"); + params.set("prefix", "UPDATE.updateHandler"); + params.set("registry", "solr.core." + collection); // use generic request to avoid extra processing of queries QueryRequest req = new QueryRequest(params); NamedList resp = client.request(req); - NamedList mbeans = (NamedList) resp.get("solr-mbeans"); - NamedList uhandlerCat = (NamedList) mbeans.get("UPDATE"); - NamedList uhandler = (NamedList) uhandlerCat.get("updateHandler"); - NamedList stats = (NamedList) uhandler.get("stats"); - return (Long) stats.get("commits"); + NamedList metrics = (NamedList) resp.get("metrics"); + NamedList uhandlerCat = (NamedList) metrics.getVal(0); + Map commits = (Map) uhandlerCat.get("UPDATE.updateHandler.commits"); + return (Long) commits.get("count"); } } diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java index 26fa3257398b..f48f76b5736a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java @@ -16,11 +16,14 @@ */ package org.apache.solr.cloud; +import java.util.Map; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.solr.common.SolrException; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.util.NamedList; import org.apache.solr.core.SolrCore; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; @@ -158,11 +161,11 @@ public void testBasic() throws Exception { } // test stats call - NamedList stats = core.getStatistics(); - assertEquals("collection1", stats.get("coreName")); - assertEquals("collection1", stats.get("collection")); - assertEquals("shard1", stats.get("shard")); - assertTrue(stats.get("refCount") != null); + Map metrics = h.getCore().getCoreMetricManager().getRegistry().getMetrics(); + assertEquals("collection1", ((Gauge)metrics.get("CORE.coreName")).getValue()); + assertEquals("collection1", ((Gauge)metrics.get("CORE.collection")).getValue()); + assertEquals("shard1", ((Gauge)metrics.get("CORE.shard")).getValue()); + assertTrue(metrics.get("CORE.refCount") != null); //zkController.getZkClient().printLayoutToStdOut(); } diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java index 7925358e6d22..ed9ed41b011b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java @@ -20,6 +20,7 @@ import javax.management.MBeanServerFactory; import javax.management.ObjectName; import java.io.IOException; +import java.lang.invoke.MethodHandles; import java.lang.management.ManagementFactory; import java.nio.file.Files; import java.nio.file.Path; @@ -37,6 +38,7 @@ import java.util.concurrent.TimeUnit; import com.google.common.collect.ImmutableList; +import org.apache.commons.io.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.TestUtil; import org.apache.solr.client.solrj.SolrClient; @@ -68,12 +70,14 @@ import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean.Category; +import org.apache.solr.core.SolrInfoBean.Category; import org.apache.solr.util.TestInjection; import org.apache.solr.util.TimeOut; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP; import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR; @@ -83,6 +87,7 @@ */ @Slow public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @BeforeClass public static void beforeCollectionsAPIDistributedZkTest() { @@ -94,9 +99,11 @@ public static void beforeCollectionsAPIDistributedZkTest() { @BeforeClass public static void setupCluster() throws Exception { + String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8"); configureCluster(4) .addConfig("conf", configset("cloud-minimal")) .addConfig("conf2", configset("cloud-minimal-jmx")) + .withSolrXml(solrXml) .configure(); } @@ -549,7 +556,7 @@ private void checkInstanceDirs(JettySolrRunner jetty) throws IOException { for (SolrCore core : theCores) { // look for core props file - Path instancedir = (Path) core.getStatistics().get("instanceDir"); + Path instancedir = (Path) core.getResourceLoader().getInstancePath(); assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties"))); Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName()); @@ -620,25 +627,22 @@ private void checkNoTwoShardsUseTheSameIndexDir() throws Exception { Set mbeans = new HashSet<>(); mbeans.addAll(server.queryNames(null, null)); for (final ObjectName mbean : mbeans) { - Object value; - Object indexDir; - Object name; try { - if (((value = server.getAttribute(mbean, "category")) != null && value - .toString().equals(Category.CORE.toString())) - && ((indexDir = server.getAttribute(mbean, "coreName")) != null) - && ((indexDir = server.getAttribute(mbean, "indexDir")) != null) - && ((name = server.getAttribute(mbean, "name")) != null)) { - if (!indexDirToShardNamesMap.containsKey(indexDir.toString())) { - indexDirToShardNamesMap.put(indexDir.toString(), - new HashSet()); + Map props = mbean.getKeyPropertyList(); + String category = props.get("category"); + String name = props.get("name"); + if ((category != null && category.toString().equals(Category.CORE.toString())) && + (name != null && name.equals("indexDir"))) { + String indexDir = server.getAttribute(mbean, "Value").toString(); + String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4"); + if (!indexDirToShardNamesMap.containsKey(indexDir)) { + indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>()); } - indexDirToShardNamesMap.get(indexDir.toString()).add( - name.toString()); + indexDirToShardNamesMap.get(indexDir.toString()).add(key); } } catch (Exception e) { - // ignore, just continue - probably a "category" or "source" attribute + // ignore, just continue - probably a "Value" attribute // not found } } diff --git a/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java b/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java index b6754c71bf63..d2d6a16f3352 100644 --- a/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java @@ -113,13 +113,15 @@ public void testDistributedQueueBlocking() throws Exception { // After draining the queue, a watcher should be set. assertNull(dq.peek(100)); - assertTrue(dq.hasWatcher()); + assertFalse(dq.isDirty()); + assertEquals(1, dq.watcherCount()); forceSessionExpire(); // Session expiry should have fired the watcher. Thread.sleep(100); - assertFalse(dq.hasWatcher()); + assertTrue(dq.isDirty()); + assertEquals(0, dq.watcherCount()); // Rerun the earlier test make sure updates are still seen, post reconnection. future = executor.submit(() -> new String(dq.peek(true), UTF8)); @@ -137,6 +139,50 @@ public void testDistributedQueueBlocking() throws Exception { assertNull(dq.poll()); } + @Test + public void testLeakChildWatcher() throws Exception { + String dqZNode = "/distqueue/test"; + DistributedQueue dq = makeDistributedQueue(dqZNode); + assertTrue(dq.peekElements(1, 1, s1 -> true).isEmpty()); + assertEquals(1, dq.watcherCount()); + assertFalse(dq.isDirty()); + assertTrue(dq.peekElements(1, 1, s1 -> true).isEmpty()); + assertEquals(1, dq.watcherCount()); + assertFalse(dq.isDirty()); + assertNull(dq.peek()); + assertEquals(1, dq.watcherCount()); + assertFalse(dq.isDirty()); + assertNull(dq.peek(10)); + assertEquals(1, dq.watcherCount()); + assertFalse(dq.isDirty()); + + dq.offer("hello world".getBytes(UTF8)); + assertNotNull(dq.peek()); // synchronously available + // dirty and watcher state indeterminate here, race with watcher + Thread.sleep(100); // watcher should have fired now + assertNotNull(dq.peek()); + assertEquals(1, dq.watcherCount()); + assertFalse(dq.isDirty()); + assertFalse(dq.peekElements(1, 1, s -> true).isEmpty()); + assertEquals(1, dq.watcherCount()); + assertFalse(dq.isDirty()); + } + + @Test + public void testLocallyOffer() throws Exception { + String dqZNode = "/distqueue/test"; + DistributedQueue dq = makeDistributedQueue(dqZNode); + dq.peekElements(1, 1, s -> true); + for (int i = 0; i < 100; i++) { + byte[] data = String.valueOf(i).getBytes(UTF8); + dq.offer(data); + assertNotNull(dq.peek()); + dq.poll(); + dq.peekElements(1, 1, s -> true); + } + } + + @Test public void testPeekElements() throws Exception { String dqZNode = "/distqueue/test"; diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java index 9441e3ff1ef0..9100eee67f4b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java @@ -18,7 +18,6 @@ import java.io.File; import java.lang.invoke.MethodHandles; -import java.net.ServerSocket; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -71,14 +70,6 @@ public JettySolrRunner createJetty(File solrHome, String dataDir, return createProxiedJetty(solrHome, dataDir, shardList, solrConfigOverride, schemaOverride); } - protected int getNextAvailablePort() throws Exception { - int port = -1; - try (ServerSocket s = new ServerSocket(0)) { - port = s.getLocalPort(); - } - return port; - } - @Test public void test() throws Exception { log.info("replication factor test running"); diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java index 9d415bcea443..8c49f6b8ca4b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java @@ -18,24 +18,40 @@ import java.io.File; import java.io.FilenameFilter; +import java.io.StringReader; import java.lang.invoke.MethodHandles; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import org.apache.commons.cli.CommandLine; +import org.apache.http.HttpEntity; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.util.EntityUtils; import org.apache.solr.client.solrj.SolrQuery; +import org.apache.solr.client.solrj.impl.CloudSolrClient; import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest; import org.apache.solr.client.solrj.response.QueryResponse; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.util.ExternalPaths; import org.apache.solr.util.SolrCLI; import org.junit.Test; +import org.noggit.JSONParser; +import org.noggit.ObjectBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static java.util.Arrays.asList; +import static org.apache.solr.common.util.Utils.getObjectByPath; + /** * Emulates bin/solr -e cloud -noprompt; bin/post -c gettingstarted example/exampledocs/*.xml; * this test is useful for catching regressions in indexing the example docs in collections that @@ -129,10 +145,16 @@ public boolean accept(File dir, String name) { cloudClient.request(req); } cloudClient.commit(); - Thread.sleep(1000); - QueryResponse qr = cloudClient.query(new SolrQuery("*:*")); - int numFound = (int)qr.getResults().getNumFound(); + int numFound = 0; + + // give the update a chance to take effect. + for (int idx = 0; idx < 100; ++idx) { + QueryResponse qr = cloudClient.query(new SolrQuery("*:*")); + numFound = (int) qr.getResults().getNumFound(); + if (numFound == expectedXmlDocCount) break; + Thread.sleep(100); + } assertEquals("*:* found unexpected number of documents", expectedXmlDocCount, numFound); log.info("Updating Config for " + testCollectionName); @@ -192,6 +214,9 @@ protected void doTestConfigUpdate(String testCollectionName, String solrUrl) thr "-value", maxTime.toString(), "-solrUrl", solrUrl }; + + Map startTimes = getSoftAutocommitInterval(testCollectionName); + SolrCLI.ConfigTool tool = new SolrCLI.ConfigTool(); CommandLine cli = SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(tool.getOptions()), args); log.info("Sending set-property '" + prop + "'=" + maxTime + " to SolrCLI.ConfigTool."); @@ -201,5 +226,55 @@ protected void doTestConfigUpdate(String testCollectionName, String solrUrl) thr maxTimeFromConfig = SolrCLI.atPath("/config/updateHandler/autoSoftCommit/maxTime", configJson); assertNotNull(maxTimeFromConfig); assertEquals(maxTime, maxTimeFromConfig); + + log.info("live_nodes_count : " + cloudClient.getZkStateReader().getClusterState().getLiveNodes()); + + // Since it takes some time for this command to complete we need to make sure all the reloads for + // all the cores have been done. + boolean allGood = false; + Map curSoftCommitInterval = null; + for (int idx = 0; idx < 600 && allGood == false; ++idx) { + curSoftCommitInterval = getSoftAutocommitInterval(testCollectionName); + if (curSoftCommitInterval.size() > 0 && curSoftCommitInterval.size() == startTimes.size()) { // no point in even trying if they're not the same size! + allGood = true; + for (Map.Entry currEntry : curSoftCommitInterval.entrySet()) { + if (currEntry.getValue().equals(maxTime) == false) { + allGood = false; + } + } + } + if (allGood == false) { + Thread.sleep(100); + } + } + assertTrue("All cores should have been reloaded within 60 seconds!!!", allGood); + } + + // Collect all of the autoSoftCommit intervals. + private Map getSoftAutocommitInterval(String collection) throws Exception { + Map ret = new HashMap<>(); + DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection(collection); + for (Slice slice : coll.getActiveSlices()) { + for (Replica replica : slice.getReplicas()) { + String uri = "" + replica.get(ZkStateReader.BASE_URL_PROP) + "/" + replica.get(ZkStateReader.CORE_NAME_PROP) + "/config?wt=json"; + Map respMap = getAsMap(cloudClient, uri); + Long maxTime = (Long) (getObjectByPath(respMap, true, asList("config", "updateHandler", "autoSoftCommit", "maxTime"))); + ret.put(replica.getCoreName(), maxTime); + } + } + return ret; } + + private Map getAsMap(CloudSolrClient cloudClient, String uri) throws Exception { + HttpGet get = new HttpGet(uri); + HttpEntity entity = null; + try { + entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity(); + String response = EntityUtils.toString(entity, StandardCharsets.UTF_8); + return (Map) ObjectBuilder.getVal(new JSONParser(new StringReader(response))); + } finally { + EntityUtils.consumeQuietly(entity); + } + } + } diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java index 1c1c5c1ca337..d7b9d8aeaeca 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; +import com.codahale.metrics.Counter; import org.apache.lucene.util.TestUtil; import org.apache.solr.BaseDistributedSearchTestCase; import org.apache.solr.SolrTestCaseJ4; @@ -39,6 +40,7 @@ import org.apache.solr.common.util.Utils; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.SolrCore; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.request.SolrRequestHandler; import org.junit.Test; import org.slf4j.Logger; @@ -109,10 +111,13 @@ private void testRequestTracking() throws Exception { Map shardVsCount = new HashMap<>(); for (JettySolrRunner runner : jettys) { CoreContainer container = runner.getCoreContainer(); + SolrMetricManager metricManager = container.getMetricManager(); for (SolrCore core : container.getCores()) { + String registry = core.getCoreMetricManager().getRegistryName(); + Counter cnt = metricManager.counter(null, registry, "requests", "QUERY.standard"); SolrRequestHandler select = core.getRequestHandler(""); - long c = (long) select.getStatistics().get("requests"); - shardVsCount.put(core.getName(), (int) c); +// long c = (long) select.getStatistics().get("requests"); + shardVsCount.put(core.getName(), (int) cnt.getCount()); } } @@ -190,6 +195,10 @@ private void testQueryAgainstDownReplica() throws Exception { } assertNotNull(leaderCore); + SolrMetricManager leaderMetricManager = leaderCore.getCoreContainer().getMetricManager(); + String leaderRegistry = leaderCore.getCoreMetricManager().getRegistryName(); + Counter cnt = leaderMetricManager.counter(null, leaderRegistry, "requests", "QUERY.standard"); + // All queries should be served by the active replica // To make sure that's true we keep querying the down replica // If queries are getting processed by the down replica then the cluster state hasn't updated for that replica @@ -200,8 +209,7 @@ private void testQueryAgainstDownReplica() throws Exception { count++; client.query(new SolrQuery("*:*")); - SolrRequestHandler select = leaderCore.getRequestHandler(""); - long c = (long) select.getStatistics().get("requests"); + long c = cnt.getCount(); if (c == 1) { break; // cluster state has got update locally @@ -222,8 +230,7 @@ private void testQueryAgainstDownReplica() throws Exception { client.query(new SolrQuery("*:*")); count++; - SolrRequestHandler select = leaderCore.getRequestHandler(""); - long c = (long) select.getStatistics().get("requests"); + long c = cnt.getCount(); assertEquals("Query wasn't served by leader", count, c); } diff --git a/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java b/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java index 5f0d537a7e25..aa42664b4009 100644 --- a/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java +++ b/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java @@ -19,7 +19,7 @@ import java.util.Map; import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.response.SolrQueryResponse; import org.junit.BeforeClass; import org.junit.Test; @@ -88,11 +88,11 @@ public void testPrefixQuery() throws Exception { public void testCacheAssumptions() throws Exception { String fq= "name:d*"; SolrCore core = h.getCore(); - SolrInfoMBean filterCacheStats = core.getInfoRegistry().get("filterCache"); - long fqInserts = (long) filterCacheStats.getStatistics().get("inserts"); + MetricsMap filterCacheStats = (MetricsMap)core.getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.filterCache"); + long fqInserts = (long) filterCacheStats.getValue().get("inserts"); - SolrInfoMBean queryCacheStats = core.getInfoRegistry().get("queryResultCache"); - long qrInserts = (long) queryCacheStats.getStatistics().get("inserts"); + MetricsMap queryCacheStats = (MetricsMap)core.getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache"); + long qrInserts = (long) queryCacheStats.getValue().get("inserts"); // This gets 0 docs back. Use 10000 instead of 1 for timeAllowed and it gets 100 back and the for loop below // succeeds. @@ -105,16 +105,16 @@ public void testCacheAssumptions() throws Exception { assertTrue("Should have partial results", (Boolean) (header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))); assertEquals("Should NOT have inserted partial results in the cache!", - (long) queryCacheStats.getStatistics().get("inserts"), qrInserts); + (long) queryCacheStats.getValue().get("inserts"), qrInserts); - assertEquals("Should NOT have another insert", fqInserts, (long) filterCacheStats.getStatistics().get("inserts")); + assertEquals("Should NOT have another insert", fqInserts, (long) filterCacheStats.getValue().get("inserts")); // At the end of all this, we should have no hits in the queryResultCache. response = JQ(req("q", "*:*", "fq", fq, "indent", "true", "timeAllowed", longTimeout)); // Check that we did insert this one. - assertEquals("Hits should still be 0", (long) filterCacheStats.getStatistics().get("hits"), 0L); - assertEquals("Inserts should be bumped", (long) filterCacheStats.getStatistics().get("inserts"), fqInserts + 1); + assertEquals("Hits should still be 0", (long) filterCacheStats.getValue().get("hits"), 0L); + assertEquals("Inserts should be bumped", (long) filterCacheStats.getValue().get("inserts"), fqInserts + 1); res = (Map) ObjectBuilder.fromJSON(response); body = (Map) (res.get("response")); @@ -130,14 +130,14 @@ public void testCacheAssumptions() throws Exception { public void testQueryResults() throws Exception { String q = "name:e*"; SolrCore core = h.getCore(); - SolrInfoMBean queryCacheStats = core.getInfoRegistry().get("queryResultCache"); - NamedList nl = queryCacheStats.getStatistics(); + MetricsMap queryCacheStats = (MetricsMap)core.getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache"); + Map nl = queryCacheStats.getValue(); long inserts = (long) nl.get("inserts"); String response = JQ(req("q", q, "indent", "true", "timeAllowed", "1", "sleep", sleep)); // The queryResultCache should NOT get an entry here. - nl = queryCacheStats.getStatistics(); + nl = queryCacheStats.getValue(); assertEquals("Should NOT have inserted partial results!", inserts, (long) nl.get("inserts")); Map res = (Map) ObjectBuilder.fromJSON(response); @@ -150,7 +150,7 @@ public void testQueryResults() throws Exception { response = JQ(req("q", q, "indent", "true", "timeAllowed", longTimeout)); // Check that we did insert this one. - NamedList nl2 = queryCacheStats.getStatistics(); + Map nl2 = queryCacheStats.getValue(); assertEquals("Hits should still be 0", (long) nl.get("hits"), (long) nl2.get("hits")); assertTrue("Inserts should be bumped", inserts < (long) nl2.get("inserts")); diff --git a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java index 75f6c9b6ba73..2a4dcc0513bd 100644 --- a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java @@ -20,9 +20,9 @@ import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; -import java.util.Iterator; import java.util.Locale; import java.util.Map; +import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -30,11 +30,14 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.NoLockFactory; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.cloud.hdfs.HdfsTestUtil; import org.apache.solr.common.util.NamedList; import org.apache.solr.core.DirectoryFactory.DirContext; import org.apache.solr.handler.SnapShooter; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.store.hdfs.HdfsLocalityReporter; import org.apache.solr.util.BadHdfsThreadsFilter; import org.apache.solr.util.MockCoreContainer.MockCoreDescriptor; @@ -175,24 +178,24 @@ public void testCleanupOldIndexDirectories() throws Exception { public void testLocalityReporter() throws Exception { Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster); conf.set("dfs.permissions.enabled", "false"); - + + Random r = random(); HdfsDirectoryFactory factory = new HdfsDirectoryFactory(); + SolrMetricManager metricManager = new SolrMetricManager(); + String registry = TestUtil.randomSimpleString(r, 2, 10); + String scope = TestUtil.randomSimpleString(r,2, 10); Map props = new HashMap(); props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr"); props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false"); props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false"); props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true"); factory.init(new NamedList<>(props)); - - Iterator it = factory.offerMBeans().iterator(); - it.next(); // skip - SolrInfoMBean localityBean = it.next(); // brittle, but it's ok - - // Make sure we have the right bean. - assertEquals("Got the wrong bean: " + localityBean.getName(), "hdfs-locality", localityBean.getName()); - + factory.initializeMetrics(metricManager, registry, scope); + + // get the metrics map for the locality bean + MetricsMap metrics = (MetricsMap)metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality"); // We haven't done anything, so there should be no data - NamedList statistics = localityBean.getStatistics(); + Map statistics = metrics.getValue(); assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); assertEquals( @@ -210,7 +213,7 @@ public void testLocalityReporter() throws Exception { // no locality because hostname not set factory.setHost("bogus"); - statistics = localityBean.getStatistics(); + statistics = metrics.getValue(); assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL), @@ -221,7 +224,7 @@ public void testLocalityReporter() throws Exception { // set hostname and check again factory.setHost("127.0.0.1"); - statistics = localityBean.getStatistics(); + statistics = metrics.getValue(); assertEquals( "Did not count block as local after setting hostname: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL), diff --git a/solr/core/src/test/org/apache/solr/core/MockInfoBean.java b/solr/core/src/test/org/apache/solr/core/MockInfoBean.java new file mode 100644 index 000000000000..dfa94ae11213 --- /dev/null +++ b/solr/core/src/test/org/apache/solr/core/MockInfoBean.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.core; + +import java.util.HashSet; +import java.util.Set; + +import com.codahale.metrics.MetricRegistry; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricProducer; + +class MockInfoBean implements SolrInfoBean, SolrMetricProducer { + Set metricNames = new HashSet<>(); + MetricRegistry registry; + + @Override + public String getName() { + return "mock"; + } + + @Override + public Category getCategory() { + return Category.OTHER; + } + + @Override + public String getDescription() { + return "mock"; + } + + @Override + public Set getMetricNames() { + return metricNames; + } + + @Override + public MetricRegistry getMetricRegistry() { + return registry; + } + + @Override + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + registry = manager.registry(registryName); + MetricsMap metricsMap = new MetricsMap((detailed, map) -> { + map.put("Integer", 123); + map.put("Double",567.534); + map.put("Long", 32352463l); + map.put("Short", (short) 32768); + map.put("Byte", (byte) 254); + map.put("Float", 3.456f); + map.put("String","testing"); + map.put("Object", new Object()); + }); + manager.registerGauge(this, registryName, metricsMap, true, getClass().getSimpleName(), getCategory().toString(), scope); + } +} \ No newline at end of file diff --git a/solr/core/src/test/org/apache/solr/core/MockInfoMBean.java b/solr/core/src/test/org/apache/solr/core/MockInfoMBean.java deleted file mode 100644 index e0d566c47a07..000000000000 --- a/solr/core/src/test/org/apache/solr/core/MockInfoMBean.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.core; - -import java.net.URL; - -import org.apache.solr.common.util.NamedList; - -class MockInfoMBean implements SolrInfoMBean { - @Override - public String getName() { - return "mock"; - } - - @Override - public Category getCategory() { - return Category.OTHER; - } - - @Override - public String getDescription() { - return "mock"; - } - - @Override - public URL[] getDocs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getVersion() { - return "mock"; - } - - @Override - public String getSource() { - return "mock"; - } - - @Override - @SuppressWarnings("unchecked") - public NamedList getStatistics() { - NamedList myList = new NamedList(); - myList.add("Integer", 123); - myList.add("Double",567.534); - myList.add("Long", 32352463l); - myList.add("Short", (short) 32768); - myList.add("Byte", (byte) 254); - myList.add("Float", 3.456f); - myList.add("String","testing"); - myList.add("Object", new Object()); - return myList; - } -} \ No newline at end of file diff --git a/solr/core/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java b/solr/core/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java index 367870a697fc..bcf6e9f91db4 100644 --- a/solr/core/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java +++ b/solr/core/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java @@ -17,6 +17,7 @@ package org.apache.solr.core; import org.apache.solr.handler.RequestHandlerBase; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.common.util.NamedList; @@ -40,6 +41,12 @@ public void init(NamedList args) { super.init(args); } + @Override + public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) { + super.initializeMetrics(manager, registryName, scope); + manager.registerGauge(this, registryName, () -> initCounter.intValue(), true, "initCount", getCategory().toString(), scope); + } + @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { this.req = req; @@ -51,12 +58,4 @@ public String getDescription() { String result = null; return result; } - - @Override - public NamedList getStatistics() { - NamedList lst = super.getStatistics(); - lst.add("initCount", initCounter.intValue()); - return lst; - } - } diff --git a/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java b/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java index 9a953e1cccab..3c13645702c7 100644 --- a/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java +++ b/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java @@ -16,9 +16,13 @@ */ package org.apache.solr.core; +import java.util.Map; + +import com.codahale.metrics.Gauge; import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.request.SolrRequestHandler; +import org.apache.solr.util.stats.MetricUtils; import org.junit.BeforeClass; import org.junit.Test; @@ -30,10 +34,11 @@ public static void beforeClass() throws Exception { @Test public void testInitCount() { - SolrCore core = h.getCore(); - SolrRequestHandler handler = core.getRequestHandler( "mock" ); + String registry = h.getCore().getCoreMetricManager().getRegistryName(); + SolrMetricManager manager = h.getCoreContainer().getMetricManager(); + Gauge g = (Gauge)manager.registry(registry).getMetrics().get("QUERY.mock.initCount"); assertEquals("Incorrect init count", - 1, handler.getStatistics().get("initCount")); + 1, g.getValue().intValue()); } @Test @@ -105,11 +110,11 @@ public void testStatistics() { "text", "line up and fly directly at the enemy death cannons, clogging them with wreckage!")); assertU(commit()); - NamedList updateStats = updateHandler.getStatistics(); - NamedList termStats = termHandler.getStatistics(); + Map updateStats = MetricUtils.convertMetrics(updateHandler.getMetricRegistry(), updateHandler.getMetricNames()); + Map termStats = MetricUtils.convertMetrics(termHandler.getMetricRegistry(), termHandler.getMetricNames()); - Double updateTime = (Double) updateStats.get("avgTimePerRequest"); - Double termTime = (Double) termStats.get("avgTimePerRequest"); + Long updateTime = (Long) updateStats.get("UPDATE./update.totalTime"); + Long termTime = (Long) termStats.get("QUERY./terms.totalTime"); assertFalse("RequestHandlers should not share statistics!", updateTime.equals(termTime)); } diff --git a/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java b/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java index 695e8693ec1a..c042bd66c4e8 100644 --- a/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java +++ b/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java @@ -245,10 +245,10 @@ public void testInfoRegistry() throws Exception { //TEst that SolrInfoMBeans are registered, including SearchComponents SolrCore core = h.getCore(); - Map infoRegistry = core.getInfoRegistry(); + Map infoRegistry = core.getInfoRegistry(); assertTrue("infoRegistry Size: " + infoRegistry.size() + " is not greater than: " + 0, infoRegistry.size() > 0); //try out some that we know are in the config - SolrInfoMBean bean = infoRegistry.get(SpellCheckComponent.COMPONENT_NAME); + SolrInfoBean bean = infoRegistry.get(SpellCheckComponent.COMPONENT_NAME); assertNotNull("bean not registered", bean); //try a default one bean = infoRegistry.get(QueryComponent.COMPONENT_NAME); diff --git a/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java b/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java index 7d2f174982f8..bdef535fc84d 100644 --- a/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java +++ b/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java @@ -215,11 +215,15 @@ public void testCompressionModeDefault() throws IOException { assertNull("Unexpected configuration of codec factory for this test. Expecting empty element", config.getNode("codecFactory", false).getFirstChild()); IndexSchema schema = IndexSchemaFactory.buildIndexSchema("schema_codec.xml", config); + + CoreContainer coreContainer = h.getCoreContainer(); try { - c = new SolrCore(new CoreDescriptor(h.getCoreContainer(), newCoreName, testSolrHome.resolve(newCoreName)), + CoreDescriptor cd = new CoreDescriptor(newCoreName, testSolrHome.resolve(newCoreName), + coreContainer.getContainerProperties(), coreContainer.isZooKeeperAware()); + c = new SolrCore(coreContainer, cd, new ConfigSet("fakeConfigset", config, schema, null, true)); - assertNull(h.getCoreContainer().registerCore(newCoreName, c, false, false)); + assertNull(coreContainer.registerCore(cd, c, false, false)); h.coreName = newCoreName; assertEquals("We are not using the correct core", "solrconfig_codec2.xml", h.getCore().getConfigResource()); assertU(add(doc("string_f", "foo"))); @@ -227,7 +231,7 @@ public void testCompressionModeDefault() throws IOException { assertCompressionMode(SchemaCodecFactory.SOLR_DEFAULT_COMPRESSION_MODE.name(), h.getCore()); } finally { h.coreName = previousCoreName; - h.getCoreContainer().unload(newCoreName); + coreContainer.unload(newCoreName); } } diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java index d23b8b147143..2949e2e5bc46 100644 --- a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java +++ b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java @@ -214,7 +214,9 @@ public void testDeleteBadCores() throws Exception { final CoreContainer cc = new CoreContainer(SolrXmlConfig.fromString(resourceLoader, CONFIGSETS_SOLR_XML), new Properties(), cl); Path corePath = resourceLoader.getInstancePath().resolve("badcore"); - CoreDescriptor badcore = new CoreDescriptor(cc, "badcore", corePath, "configSet", "nosuchconfigset"); + CoreDescriptor badcore = new CoreDescriptor("badcore", corePath, cc.getContainerProperties(), cc.isZooKeeperAware(), + "configSet", "nosuchconfigset"); + cl.add(badcore); try { @@ -383,6 +385,7 @@ public void swap(CoreContainer cc, CoreDescriptor cd1, CoreDescriptor cd2) { public List discover(CoreContainer cc) { return cores; } + } @Test @@ -397,7 +400,7 @@ public void testCoreInitFailuresFromEmptyContainer() throws Exception { CoreContainer cc = init(CONFIGSETS_SOLR_XML); // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 0, cores.size()); @@ -420,7 +423,7 @@ public void testCoreInitFailuresFromEmptyContainer() throws Exception { } // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 0, cores.size()); @@ -467,12 +470,14 @@ public void testCoreInitFailuresOnReload() throws Exception { System.setProperty("configsets", getFile("solr/configsets").getAbsolutePath()); final CoreContainer cc = new CoreContainer(SolrXmlConfig.fromString(resourceLoader, CONFIGSETS_SOLR_XML), new Properties(), cl); - cl.add(new CoreDescriptor(cc, "col_ok", resourceLoader.getInstancePath().resolve("col_ok"), "configSet", "minimal")); - cl.add(new CoreDescriptor(cc, "col_bad", resourceLoader.getInstancePath().resolve("col_bad"), "configSet", "bad-mergepolicy")); + cl.add(new CoreDescriptor("col_ok", resourceLoader.getInstancePath().resolve("col_ok"), + cc.getContainerProperties(), cc.isZooKeeperAware(), "configSet", "minimal")); + cl.add(new CoreDescriptor("col_bad", resourceLoader.getInstancePath().resolve("col_bad"), + cc.getContainerProperties(), cc.isZooKeeperAware(), "configSet", "bad-mergepolicy")); cc.load(); // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 1, cores.size()); assertTrue("col_ok not found", cores.contains("col_ok")); @@ -509,7 +514,7 @@ public void testCoreInitFailuresOnReload() throws Exception { cc.create("col_bad", ImmutableMap.of()); // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 2, cores.size()); assertTrue("col_ok not found", cores.contains("col_ok")); @@ -534,7 +539,7 @@ public void testCoreInitFailuresOnReload() throws Exception { } // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 2, cores.size()); assertTrue("col_ok not found", cores.contains("col_ok")); @@ -591,7 +596,7 @@ public void testCoreInitFailuresOnReload() throws Exception { col_bad_old_start, getCoreStartTime(cc, "col_bad")); // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 2, cores.size()); assertTrue("col_ok not found", cores.contains("col_ok")); @@ -619,7 +624,7 @@ public void testCoreInitFailuresOnReload() throws Exception { // check that we have the cores we expect - cores = cc.getCoreNames(); + cores = cc.getLoadedCoreNames(); assertNotNull("core names is null", cores); assertEquals("wrong number of cores", 2, cores.size()); assertTrue("col_ok not found", cores.contains("col_ok")); diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java b/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java index 22020baf352c..0c05d83aa908 100644 --- a/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java +++ b/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java @@ -200,7 +200,7 @@ public void testTooManyTransientCores() throws Exception { cc.load(); // Just check that the proper number of cores are loaded since making the test depend on order would be fragile assertEquals("There should only be 3 cores loaded, coreLOS and two coreT? cores", - 3, cc.getCoreNames().size()); + 3, cc.getLoadedCoreNames().size()); SolrCore c1 = cc.getCore("coreT1"); assertNotNull("Core T1 should NOT BE NULL", c1); diff --git a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java index f841f92781f1..db941f7efb3e 100644 --- a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java +++ b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java @@ -16,7 +16,10 @@ */ package org.apache.solr.core; -import org.apache.solr.core.JmxMonitoredMap.SolrDynamicMBean; +import org.apache.solr.metrics.SolrMetricManager; +import org.apache.solr.metrics.SolrMetricReporter; +import org.apache.solr.metrics.reporters.JmxObjectNameFactory; +import org.apache.solr.metrics.reporters.SolrJmxReporter; import org.apache.solr.util.AbstractSolrTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -29,12 +32,10 @@ import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; import javax.management.MBeanServer; -import javax.management.MalformedObjectNameException; import javax.management.ObjectInstance; import javax.management.ObjectName; import java.lang.invoke.MethodHandles; import java.lang.management.ManagementFactory; -import java.util.Hashtable; import java.util.Map; import java.util.Set; @@ -49,6 +50,8 @@ public class TestJmxIntegration extends AbstractSolrTestCase { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static MBeanServer mbeanServer = null; + private static JmxObjectNameFactory nameFactory = null; + private static String registryName = null; @BeforeClass public static void beforeClass() throws Exception { @@ -61,25 +64,30 @@ public static void beforeClass() throws Exception { initCore("solrconfig.xml", "schema.xml"); - // we should be able to se that the core has JmxIntegration enabled - assertTrue("JMX not enabled", - h.getCore().getSolrConfig().jmxConfig.enabled); - // and we should be able to see that the the monitor map found - // a JMX server to use, which refers to the server we started - - Map registry = h.getCore().getInfoRegistry(); - assertTrue("info registry is not a JMX monitored map", - registry instanceof JmxMonitoredMap); - mbeanServer = ((JmxMonitoredMap)registry).getServer(); - - assertNotNull("No JMX server found by monitor map", - mbeanServer); - - // NOTE: we can't garuntee that "mbeanServer == platformServer" - // the JVM may have mutiple MBean servers funning when the test started - // and the contract of not specifying one when configuring solr with - // is that it will use whatever the "first" MBean server + // we should be able to see that the core has JmxIntegration enabled + registryName = h.getCore().getCoreMetricManager().getRegistryName(); + SolrMetricManager manager = h.getCoreContainer().getMetricManager(); + Map reporters = manager.getReporters(registryName); + assertEquals(1, reporters.size()); + SolrMetricReporter reporter = reporters.values().iterator().next(); + assertTrue(reporter instanceof SolrJmxReporter); + SolrJmxReporter jmx = (SolrJmxReporter)reporter; + assertTrue("JMX not enabled", jmx.isActive()); + // and we should be able to see that the reporter + // refers to the JMX server we started + + mbeanServer = jmx.getMBeanServer(); + + assertNotNull("No JMX server found in the reporter", + mbeanServer); + + // NOTE: we can't guarantee that "mbeanServer == platformServer" + // the JVM may have multiple MBean servers running when the test started + // and the contract of not specifying one when configuring solr.xml without + // agetnId or serviceUrl is that it will use whatever the "first" MBean server // returned by the JVM is. + + nameFactory = new JmxObjectNameFactory("default", registryName); } @AfterClass @@ -93,34 +101,38 @@ public void testJmxRegistration() throws Exception { Set objects = mbeanServer.queryMBeans(null, null); assertFalse("No objects found in mbean server", objects - .isEmpty()); + .isEmpty()); int numDynamicMbeans = 0; for (ObjectInstance o : objects) { - assertNotNull("Null name on: " + o.toString(), o.getObjectName()); - MBeanInfo mbeanInfo = mbeanServer.getMBeanInfo(o.getObjectName()); - if (mbeanInfo.getClassName().endsWith(SolrDynamicMBean.class.getName())) { + ObjectName name = o.getObjectName(); + assertNotNull("Null name on: " + o.toString(), name); + MBeanInfo mbeanInfo = mbeanServer.getMBeanInfo(name); + if (name.getDomain().equals("solr")) { numDynamicMbeans++; MBeanAttributeInfo[] attrs = mbeanInfo.getAttributes(); - assertTrue("No Attributes found for mbean: " + mbeanInfo, - 0 < attrs.length); + if (name.getKeyProperty("name").equals("fetcher")) { // no attributes without active replication + continue; + } + assertTrue("No Attributes found for mbean: " + o.getObjectName() + ", " + mbeanInfo, + 0 < attrs.length); for (MBeanAttributeInfo attr : attrs) { // ensure every advertised attribute is gettable try { Object trash = mbeanServer.getAttribute(o.getObjectName(), attr.getName()); } catch (javax.management.AttributeNotFoundException e) { throw new RuntimeException("Unable to featch attribute for " + o.getObjectName() - + ": " + attr.getName(), e); + + ": " + attr.getName(), e); } } } } - assertTrue("No SolrDynamicMBeans found", 0 < numDynamicMbeans); + assertTrue("No MBeans found", 0 < numDynamicMbeans); } @Test public void testJmxUpdate() throws Exception { - SolrInfoMBean bean = null; + SolrInfoBean bean = null; // wait until searcher is registered for (int i=0; i<100; i++) { bean = h.getCore().getInfoRegistry().get("searcher"); @@ -128,18 +140,20 @@ public void testJmxUpdate() throws Exception { Thread.sleep(250); } if (bean==null) throw new RuntimeException("searcher was never registered"); - ObjectName searcher = getObjectName("searcher", bean); + ObjectName searcher = nameFactory.createName("gauge", registryName, "SEARCHER.searcher.*"); log.info("Mbeans in server: " + mbeanServer.queryNames(null, null)); + Set objects = mbeanServer.queryMBeans(searcher, null); assertFalse("No mbean found for SolrIndexSearcher", mbeanServer.queryMBeans(searcher, null).isEmpty()); - int oldNumDocs = (Integer)mbeanServer.getAttribute(searcher, "numDocs"); + ObjectName name = nameFactory.createName("gauge", registryName, "SEARCHER.searcher.numDocs"); + int oldNumDocs = (Integer)mbeanServer.getAttribute(name, "Value"); assertU(adoc("id", "1")); assertU("commit", commit()); - int numDocs = (Integer)mbeanServer.getAttribute(searcher, "numDocs"); + int numDocs = (Integer)mbeanServer.getAttribute(name, "Value"); assertTrue("New numDocs is same as old numDocs as reported by JMX", - numDocs > oldNumDocs); + numDocs > oldNumDocs); } @Test @Ignore("timing problem? https://issues.apache.org/jira/browse/SOLR-2715") @@ -183,14 +197,4 @@ public void testJmxOnCoreReload() throws Exception { log.info("After Reload: Size of infoRegistry: " + registrySize + " MBeans: " + newNumberOfObjects); assertEquals("Number of registered MBeans is not the same as info registry size", registrySize, newNumberOfObjects); } - - private ObjectName getObjectName(String key, SolrInfoMBean infoBean) - throws MalformedObjectNameException { - Hashtable map = new Hashtable<>(); - map.put("type", key); - map.put("id", infoBean.getName()); - String coreName = h.getCore().getName(); - return ObjectName.getInstance(("solr" + (null != coreName ? "/" + coreName : "")), map); - } -} - +} \ No newline at end of file diff --git a/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java b/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java deleted file mode 100644 index aa107bce0bf4..000000000000 --- a/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.core; - -import org.apache.lucene.util.LuceneTestCase; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrConfig.JmxConfiguration; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.MBeanServerConnection; -import javax.management.ObjectInstance; -import javax.management.ObjectName; -import javax.management.Query; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; -import javax.management.remote.JMXServiceURL; -import java.io.IOException; -import java.lang.invoke.MethodHandles; -import java.net.ServerSocket; -import java.rmi.registry.LocateRegistry; -import java.rmi.server.RMIServerSocketFactory; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; - -/** - * Test for JmxMonitoredMap - * - * - * @since solr 1.3 - */ -public class TestJmxMonitoredMap extends LuceneTestCase { - - private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - - private int port = 0; - - private JMXConnector connector; - - private MBeanServerConnection mbeanServer; - - private JmxMonitoredMap monitoredMap; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - String oldHost = System.getProperty("java.rmi.server.hostname"); - try { - // this stupid sysprop thing is needed, because remote stubs use an - // arbitrary local ip to connect - // See: http://weblogs.java.net/blog/emcmanus/archive/2006/12/multihomed_comp.html - System.setProperty("java.rmi.server.hostname", "127.0.0.1"); - class LocalhostRMIServerSocketFactory implements RMIServerSocketFactory { - ServerSocket socket; - - @Override - public ServerSocket createServerSocket(int port) throws IOException { - return socket = new ServerSocket(port); - } - }; - LocalhostRMIServerSocketFactory factory = new LocalhostRMIServerSocketFactory(); - LocateRegistry.createRegistry(0, null, factory); - port = factory.socket.getLocalPort(); - log.info("Using port: " + port); - String url = "service:jmx:rmi:///jndi/rmi://127.0.0.1:"+port+"/solrjmx"; - JmxConfiguration config = new JmxConfiguration(true, null, url, null); - monitoredMap = new JmxMonitoredMap<>("", "", "", config); - JMXServiceURL u = new JMXServiceURL(url); - connector = JMXConnectorFactory.connect(u); - mbeanServer = connector.getMBeanServerConnection(); - } finally { - if (oldHost == null) { - System.clearProperty("java.rmi.server.hostname"); - } else { - System.setProperty("java.rmi.server.hostname", oldHost); - } - } - } - - @Override - @After - public void tearDown() throws Exception { - try { - connector.close(); - } catch (Exception e) { - } - super.tearDown(); - } - - @Test - public void testTypeName() throws Exception{ - MockInfoMBean mock = new MockInfoMBean(); - monitoredMap.put("mock", mock); - - NamedList dynamicStats = mock.getStatistics(); - assertTrue(dynamicStats.size() != 0); - assertTrue(dynamicStats.get("Integer") instanceof Integer); - assertTrue(dynamicStats.get("Double") instanceof Double); - assertTrue(dynamicStats.get("Long") instanceof Long); - assertTrue(dynamicStats.get("Short") instanceof Short); - assertTrue(dynamicStats.get("Byte") instanceof Byte); - assertTrue(dynamicStats.get("Float") instanceof Float); - assertTrue(dynamicStats.get("String") instanceof String); - - Set objects = mbeanServer.queryMBeans(null, Query.match( - Query.attr("name"), Query.value("mock"))); - - ObjectName name = objects.iterator().next().getObjectName(); - assertMBeanTypeAndValue(name, "Integer", Integer.class, 123); - assertMBeanTypeAndValue(name, "Double", Double.class, 567.534); - assertMBeanTypeAndValue(name, "Long", Long.class, 32352463l); - assertMBeanTypeAndValue(name, "Short", Short.class, (short) 32768); - assertMBeanTypeAndValue(name, "Byte", Byte.class, (byte) 254); - assertMBeanTypeAndValue(name, "Float", Float.class, 3.456f); - assertMBeanTypeAndValue(name, "String",String.class, "testing"); - - } - - @SuppressWarnings("unchecked") - public void assertMBeanTypeAndValue(ObjectName name, String attr, Class type, Object value) throws Exception { - assertThat(mbeanServer.getAttribute(name, attr), - allOf(instanceOf(type), equalTo(value)) - ); - } - - @Test - public void testPutRemoveClear() throws Exception { - MockInfoMBean mock = new MockInfoMBean(); - monitoredMap.put("mock", mock); - - - Set objects = mbeanServer.queryMBeans(null, Query.match( - Query.attr("name"), Query.value("mock"))); - assertFalse("No MBean for mock object found in MBeanServer", objects - .isEmpty()); - - monitoredMap.remove("mock"); - objects = mbeanServer.queryMBeans(null, Query.match(Query.attr("name"), - Query.value("mock"))); - assertTrue("MBean for mock object found in MBeanServer even after removal", - objects.isEmpty()); - - monitoredMap.put("mock", mock); - monitoredMap.put("mock2", mock); - objects = mbeanServer.queryMBeans(null, Query.match(Query.attr("name"), - Query.value("mock"))); - assertFalse("No MBean for mock object found in MBeanServer", objects - .isEmpty()); - - monitoredMap.clear(); - objects = mbeanServer.queryMBeans(null, Query.match(Query.attr("name"), - Query.value("mock"))); - assertTrue( - "MBean for mock object found in MBeanServer even after clear has been called", - objects.isEmpty()); - - } - - @Test - public void testJmxAugmentedSolrInfoMBean() throws Exception { - final MockInfoMBean mock = new MockInfoMBean(); - final String jmxKey = "jmx"; - final String jmxValue = "jmxValue"; - - MockJmxAugmentedSolrInfoMBean mbean = new MockJmxAugmentedSolrInfoMBean(mock) { - @Override - public NamedList getStatisticsForJmx() { - NamedList stats = getStatistics(); - stats.add(jmxKey, jmxValue); - return stats; - } - }; - monitoredMap.put("mock", mbean); - - // assert getStatistics called when used as a map. Note can't use equals here to compare - // because getStatistics returns a new Object each time. - assertNull(monitoredMap.get("mock").getStatistics().get(jmxKey)); - - // assert getStatisticsForJmx called when used as jmx server - Set objects = mbeanServer.queryMBeans(null, Query.match( - Query.attr("name"), Query.value("mock"))); - ObjectName name = objects.iterator().next().getObjectName(); - assertMBeanTypeAndValue(name, jmxKey, jmxValue.getClass(), jmxValue); - } - - private static abstract class MockJmxAugmentedSolrInfoMBean - extends SolrInfoMBeanWrapper implements JmxMonitoredMap.JmxAugmentedSolrInfoMBean { - - public MockJmxAugmentedSolrInfoMBean(SolrInfoMBean mbean) { - super(mbean); - } - - @Override - public abstract NamedList getStatisticsForJmx(); - } -} diff --git a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java index 8690e27bdec5..7c41470ece9c 100644 --- a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java +++ b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java @@ -58,7 +58,7 @@ public static void setupClass() throws Exception { } private static CoreDescriptor makeCoreDescriptor(CoreContainer cc, String coreName, String isTransient, String loadOnStartup) { - return new CoreDescriptor(cc, coreName, cc.getCoreRootDirectory().resolve(coreName), + return new CoreDescriptor(coreName, cc.getCoreRootDirectory().resolve(coreName), cc.getContainerProperties(), false, CoreDescriptor.CORE_TRANSIENT, isTransient, CoreDescriptor.CORE_LOADONSTARTUP, loadOnStartup); } @@ -372,8 +372,7 @@ private void unloadViaAdmin(CoreContainer cc, String name) throws Exception { resp); } - - + // Make sure that creating a transient core from the admin handler correctly respects the transient limits etc. @Test public void testCreateTransientFromAdmin() throws Exception { @@ -496,7 +495,13 @@ public void testBadConfigsGenerateErrors() throws Exception { copyGoodConf("badSchema2", "schema-tiny.xml", "schema.xml"); - // This should force a reload of the cores. + // Reload the cores and insure that + // 1> they pick up the new configs + // 2> they don't fail again b/c they still have entries in loadFailure in core container. + cc.reload("badConfig1"); + cc.reload("badConfig2"); + cc.reload("badSchema1"); + cc.reload("badSchema2"); SolrCore bc1 = cc.getCore("badConfig1");; SolrCore bc2 = cc.getCore("badConfig2"); SolrCore bs1 = cc.getCore("badSchema1"); @@ -640,7 +645,7 @@ private void checkStatus(CoreContainer cc, Boolean ok, String core) throws Excep } public static void checkNotInCores(CoreContainer cc, String... nameCheck) { - Collection loadedNames = cc.getCoreNames(); + Collection loadedNames = cc.getLoadedCoreNames(); for (String name : nameCheck) { assertFalse("core " + name + " was found in the list of cores", loadedNames.contains(name)); } @@ -673,7 +678,7 @@ public static void checkNotInCores(CoreContainer cc, String... nameCheck) { } public static void checkInCores(CoreContainer cc, String... nameCheck) { - Collection loadedNames = cc.getCoreNames(); + Collection loadedNames = cc.getLoadedCoreNames(); assertEquals("There whould be exactly as many loaded cores as loaded names returned. ", loadedNames.size(), nameCheck.length); diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrDynamicMBean.java b/solr/core/src/test/org/apache/solr/core/TestSolrDynamicMBean.java deleted file mode 100644 index eae4e799e894..000000000000 --- a/solr/core/src/test/org/apache/solr/core/TestSolrDynamicMBean.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.core; - -import org.apache.lucene.util.LuceneTestCase; -import org.apache.solr.core.JmxMonitoredMap.SolrDynamicMBean; -import org.junit.Before; -import org.junit.Test; - -/** - * Test for JmxMonitoredMap - * - * - * @since solr 1.3 - */ -public class TestSolrDynamicMBean extends LuceneTestCase { - - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - } - - - @Test - public void testCachedStatsOption() throws Exception{ - // SOLR-6747 Add an optional caching option as a workaround for SOLR-6586. - - SolrInfoMBean solrmbeaninfo = new MockInfoMBean(); - SolrDynamicMBean sdmbean = new SolrDynamicMBean("", solrmbeaninfo); - - sdmbean.getMBeanInfo(); - - Object object1 = sdmbean.getAttribute("Object"); - Object object2 = sdmbean.getAttribute("Object"); - - assertNotSame(object1, object2); - - sdmbean.getMBeanInfo(); - - Object object12 = sdmbean.getAttribute("Object"); - Object object22 = sdmbean.getAttribute("Object"); - - assertNotSame(object1, object12); - assertNotSame(object2, object22); - - - // test cached stats - - solrmbeaninfo = new MockInfoMBean(); - sdmbean = new SolrDynamicMBean("", solrmbeaninfo, true); - - sdmbean.getMBeanInfo(); - - object1 = sdmbean.getAttribute("Object"); - object2 = sdmbean.getAttribute("Object"); - - assertEquals(object1, object2); - - sdmbean.getMBeanInfo(); - - object12 = sdmbean.getAttribute("Object"); - object22 = sdmbean.getAttribute("Object"); - - assertNotSame(object1, object12); - assertNotSame(object2, object22); - - assertEquals(object12, object22); - - } - -} diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java index 84e2382d00c1..c7622f640ccb 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java @@ -31,7 +31,7 @@ import org.junit.Test; public class MBeansHandlerTest extends SolrTestCaseJ4 { - + @BeforeClass public static void beforeClass() throws Exception { initCore("solrconfig.xml", "schema.xml"); @@ -43,26 +43,26 @@ public void testDiff() throws Exception { CommonParams.QT,"/admin/mbeans", "stats","true", CommonParams.WT,"xml" - )); + )); List streams = new ArrayList<>(); streams.add(new ContentStreamBase.StringStream(xml)); - + LocalSolrQueryRequest req = lrf.makeRequest( CommonParams.QT,"/admin/mbeans", "stats","true", CommonParams.WT,"xml", "diff","true"); req.setContentStreams(streams); - + xml = h.query(req); NamedList>> diff = SolrInfoMBeanHandler.fromXML(xml); // The stats bean for SolrInfoMBeanHandler NamedList stats = (NamedList)diff.get("ADMIN").get("/admin/mbeans").get("stats"); - + //System.out.println("stats:"+stats); Pattern p = Pattern.compile("Was: (?[0-9]+), Now: (?[0-9]+), Delta: (?[0-9]+)"); - String response = stats.get("requests").toString(); + String response = stats.get("ADMIN./admin/mbeans.requests").toString(); Matcher m = p.matcher(response); if (!m.matches()) { fail("Response did not match pattern: " + response); @@ -96,4 +96,4 @@ public void testXMLDiffWithExternalEntity() throws Exception { assertTrue("external entity ignored properly", true); } -} +} \ No newline at end of file diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java index 2f849977e9fa..402cc250565e 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java @@ -33,7 +33,11 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { + initCore("solrconfig.xml", "schema.xml"); + // manually register some metrics in solr.jvm and solr.jetty - TestHarness doesn't init them + h.getCoreContainer().getMetricManager().counter(null, "solr.jvm", "foo"); + h.getCoreContainer().getMetricManager().counter(null, "solr.jetty", "foo"); } @Test @@ -41,13 +45,10 @@ public void test() throws Exception { MetricsHandler handler = new MetricsHandler(h.getCoreContainer()); SolrQueryResponse resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json"), resp); NamedList values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); - assertNotNull(values.get("solr.jetty")); - assertNotNull(values.get("solr.jvm")); - assertNotNull(values.get("solr.node")); NamedList nl = (NamedList) values.get("solr.core.collection1"); assertNotNull(nl); Object o = nl.get("SEARCHER.new.errors"); @@ -63,7 +64,7 @@ public void test() throws Exception { assertEquals(5, ((Map) nl.get("ADMIN./admin/authorization.clientErrors")).size()); resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm,jetty"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "group", "jvm,jetty"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); @@ -73,7 +74,7 @@ public void test() throws Exception { resp = new SolrQueryResponse(); // "collection" works too, because it's a prefix for "collection1" - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "registry", "solr.core.collection,solr.jvm"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "registry", "solr.core.collection,solr.jvm"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); @@ -83,7 +84,7 @@ public void test() throws Exception { resp = new SolrQueryResponse(); // "collection" works too, because it's a prefix for "collection1" - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "registry", "solr.core.collection", "registry", "solr.jvm"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "registry", "solr.core.collection", "registry", "solr.jvm"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); @@ -92,7 +93,7 @@ public void test() throws Exception { assertNotNull(values.get("solr.jvm")); resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm,jetty"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "group", "jvm,jetty"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); @@ -101,7 +102,7 @@ public void test() throws Exception { assertNotNull(values.get("solr.jvm")); resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm", "group", "jetty"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "group", "jvm", "group", "jetty"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); @@ -110,7 +111,7 @@ public void test() throws Exception { assertNotNull(values.get("solr.jvm")); resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "node", "type", "counter"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "group", "node", "type", "counter"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); @@ -120,14 +121,11 @@ public void test() throws Exception { assertNull(values.get("ADMIN./admin/authorization.errors")); // this is a timer node resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "prefix", "CONTAINER.cores,CONTAINER.threadPool"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "prefix", "CONTAINER.cores,CONTAINER.threadPool"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); - assertEquals(4, values.size()); - assertEquals(0, ((NamedList)values.get("solr.jvm")).size()); - assertEquals(0, ((NamedList)values.get("solr.jetty")).size()); - assertEquals(0, ((NamedList)values.get("solr.core.collection1")).size()); + assertEquals(1, values.size()); assertEquals(11, ((NamedList)values.get("solr.node")).size()); assertNotNull(values.get("solr.node")); values = (NamedList) values.get("solr.node"); @@ -136,21 +134,41 @@ public void test() throws Exception { assertNotNull(values.get("CONTAINER.threadPool.coreLoadExecutor.completed")); resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm", "prefix", "CONTAINER.cores"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "prefix", "CONTAINER.cores", "regex", "C.*thread.*completed"), resp); + values = resp.getValues(); + assertNotNull(values.get("metrics")); + values = (NamedList) values.get("metrics"); + assertNotNull(values.get("solr.node")); + values = (NamedList) values.get("solr.node"); + assertEquals(5, values.size()); + assertNotNull(values.get("CONTAINER.threadPool.coreContainerWorkExecutor.completed")); + assertNotNull(values.get("CONTAINER.threadPool.coreLoadExecutor.completed")); + + resp = new SolrQueryResponse(); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "prefix", "CACHE.core.fieldCache", "property", "entries_count", MetricsHandler.COMPACT_PARAM, "true"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); values = (NamedList) values.get("metrics"); + assertNotNull(values.get("solr.core.collection1")); + values = (NamedList) values.get("solr.core.collection1"); assertEquals(1, values.size()); - assertEquals(0, ((NamedList)values.get("solr.jvm")).size()); - assertNull(values.get("solr.node")); + Map m = (Map)values.get("CACHE.core.fieldCache"); + assertNotNull(m); + assertNotNull(m.get("entries_count")); + + resp = new SolrQueryResponse(); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "group", "jvm", "prefix", "CONTAINER.cores"), resp); + values = resp.getValues(); + assertNotNull(values.get("metrics")); + values = (NamedList) values.get("metrics"); + assertEquals(0, values.size()); resp = new SolrQueryResponse(); - handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "node", "type", "timer", "prefix", "CONTAINER.cores"), resp); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", MetricsHandler.COMPACT_PARAM, "false", CommonParams.WT, "json", "group", "node", "type", "timer", "prefix", "CONTAINER.cores"), resp); values = resp.getValues(); assertNotNull(values.get("metrics")); SimpleOrderedMap map = (SimpleOrderedMap) values.get("metrics"); - assertEquals(1, map.size()); - assertEquals(0, ((NamedList)map.get("solr.node")).size()); + assertEquals(0, map.size()); } @Test @@ -168,4 +186,41 @@ public void testCompact() throws Exception { assertNotNull(o); // counter type assertTrue(o instanceof Number); } + + @Test + public void testPropertyFilter() throws Exception { + MetricsHandler handler = new MetricsHandler(h.getCoreContainer()); + + SolrQueryResponse resp = new SolrQueryResponse(); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", + MetricsHandler.COMPACT_PARAM, "true", "group", "core", "prefix", "CACHE.searcher"), resp); + NamedList values = resp.getValues(); + assertNotNull(values.get("metrics")); + values = (NamedList) values.get("metrics"); + NamedList nl = (NamedList) values.get("solr.core.collection1"); + assertNotNull(nl); + assertTrue(nl.size() > 0); + nl.forEach((k, v) -> { + assertTrue(v instanceof Map); + Map map = (Map)v; + assertTrue(map.size() > 2); + }); + + resp = new SolrQueryResponse(); + handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", + MetricsHandler.COMPACT_PARAM, "true", "group", "core", "prefix", "CACHE.searcher", + "property", "inserts", "property", "size"), resp); + values = resp.getValues(); + values = (NamedList) values.get("metrics"); + nl = (NamedList) values.get("solr.core.collection1"); + assertNotNull(nl); + assertTrue(nl.size() > 0); + nl.forEach((k, v) -> { + assertTrue(v instanceof Map); + Map map = (Map)v; + assertEquals(2, map.size()); + assertNotNull(map.get("inserts")); + assertNotNull(map.get("size")); + }); + } } diff --git a/solr/core/src/test/org/apache/solr/handler/admin/StatsReloadRaceTest.java b/solr/core/src/test/org/apache/solr/handler/admin/StatsReloadRaceTest.java index 7bf493923e5b..fc1b496c9f5e 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/StatsReloadRaceTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/StatsReloadRaceTest.java @@ -17,7 +17,6 @@ package org.apache.solr.handler.admin; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; @@ -68,13 +67,14 @@ public void testParallelReloadAndStats() throws Exception { boolean isCompleted; do { if (random.nextBoolean()) { - requestMbeans(); + requestMetrics(true); } else { requestCoreStatus(); } isCompleted = checkReloadComlpetion(asyncId); } while (!isCompleted); + requestMetrics(false); } } @@ -106,22 +106,41 @@ private boolean checkReloadComlpetion(int asyncId) { return isCompleted; } - private void requestMbeans() throws Exception { - String stats = h.query(req( - CommonParams.QT, "/admin/mbeans", - "stats", "true")); - - NamedList> actualStats = SolrInfoMBeanHandler.fromXML(stats).get("CORE"); - - for (Map.Entry> tuple : actualStats) { - if (tuple.getKey().contains("earcher")) { // catches "searcher" and "Searcher@345345 blah" - NamedList searcherStats = tuple.getValue(); - @SuppressWarnings("unchecked") - NamedList statsList = (NamedList)searcherStats.get("stats"); - assertEquals("expect to have exactly one indexVersion at "+statsList, 1, statsList.getAll("indexVersion").size()); - assertTrue(statsList.get("indexVersion") instanceof Long); + private void requestMetrics(boolean softFail) throws Exception { + SolrQueryResponse rsp = new SolrQueryResponse(); + String registry = "solr.core." + h.coreName; + String key = "SEARCHER.searcher.indexVersion"; + boolean found = false; + int count = 10; + while (!found && count-- > 0) { + h.getCoreContainer().getRequestHandler("/admin/metrics").handleRequest( + req("prefix", "SEARCHER", "registry", registry, "compact", "true"), rsp); + + NamedList values = rsp.getValues(); + // this is not guaranteed to exist right away after core reload - there's a + // small window between core load and before searcher metrics are registered + // so we may have to check a few times, and then fail softly if reload is not complete yet + NamedList metrics = (NamedList)values.get("metrics"); + if (metrics == null) { + if (softFail) { + return; + } else { + fail("missing 'metrics' element in handler's output: " + values.asMap(5).toString()); + } + } + metrics = (NamedList)metrics.get(registry); + if (metrics.get(key) != null) { + found = true; + assertTrue(metrics.get(key) instanceof Long); + break; + } else { + Thread.sleep(500); } } + if (softFail && !found) { + return; + } + assertTrue("Key " + key + " not found in registry " + registry, found); } } diff --git a/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java index c961a55c5f5e..2e20dc8bdbaa 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java @@ -20,8 +20,10 @@ import java.lang.management.OperatingSystemMXBean; import java.util.Arrays; +import com.codahale.metrics.Gauge; import org.apache.lucene.util.LuceneTestCase; import org.apache.solr.common.util.SimpleOrderedMap; +import org.apache.solr.util.stats.MetricUtils; public class SystemInfoHandlerTest extends LuceneTestCase { @@ -36,9 +38,11 @@ public void testMagickGetter() throws Exception { info.add( "version", os.getVersion() ); info.add( "arch", os.getArch() ); - // make another using addMXBeanProperties() + // make another using MetricUtils.addMXBeanMetrics() SimpleOrderedMap info2 = new SimpleOrderedMap<>(); - SystemInfoHandler.addMXBeanProperties( os, OperatingSystemMXBean.class, info2 ); + MetricUtils.addMXBeanMetrics( os, OperatingSystemMXBean.class, null, (k, v) -> { + info2.add(k, ((Gauge)v).getValue()); + } ); // make sure they got the same thing for (String p : Arrays.asList("name", "version", "arch")) { diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java index 7ce171c63a6c..b7ad62c28e49 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentCustomSortTest.java @@ -44,11 +44,61 @@ public static void setUpBeforeClass() throws Exception { initCore("solrconfig.xml", "schema-custom-field.xml"); } + + private class TestParams { + + public final String sort; + public final String groupSort; + public final int groupLimit; + public final int groupOffset; + public final int start; + + public TestParams(String mySort, String myGroupSort, int myGroupLimit, int myGroupOffset, int myStart) { + sort = mySort; + groupSort = myGroupSort; + groupLimit = myGroupLimit; + groupOffset = myGroupOffset; + start = myStart; + } + } + + private class TestOutcome { + + public Integer[][] expectedIds; + + public TestOutcome( Integer[][] myExpectedIds ) { + expectedIds = new Integer[myExpectedIds.length][]; + for (int i = 0; i < myExpectedIds.length; ++i) { + expectedIds[i] = myExpectedIds[i].clone(); + } + } + + + public int getNumGroups() { + return expectedIds.length; + } + } + + private class TestParamsAndOutcome { + + public TestParams testParams; + public TestOutcome testOutcome; + + public TestParamsAndOutcome( + String mySort, String myGroupSort, int myGroupLimit, int myGroupOffset, + int myStart, Integer[][] myExpectedIds) { + testParams = new TestParams(mySort, myGroupSort, myGroupLimit, myGroupOffset, myStart); + testOutcome = new TestOutcome(myExpectedIds); + } + } + + @Test @ShardsFixed(num = 3) - public void test() throws Exception { del("*:*"); + public void test() throws Exception { + del("*:*"); - index(id, "1", "text", "a", "val", "25", "payload", ByteBuffer.wrap(new byte[] { 0x12, 0x62, 0x15 }), // 2 + index(id, "1", "text", "a", "val", "25", "payload", ByteBuffer.wrap(new byte[] { 0x12, 0x62, 0x15 }), // 2 // quick check to prove "*" dynamicField hasn't been broken by somebody mucking with schema "asdfasdf_field_should_match_catchall_dynamic_field_adsfasdf", "value"); index(id, "2", "text", "b", "val", "10", "payload", ByteBuffer.wrap(new byte[] { 0x25, 0x21, 0x16 })); // 5 @@ -124,381 +174,61 @@ public static void setUpBeforeClass() throws Exception { assertFieldValues(rsp.getResults(), id, 11, 13, 8, 9, 5, 3, 12, 10,16,17,18, 2, 4,14,15, 6, 1, 7); -// function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "payload desc, id asc", "group", "true", "group.field", "val", "group.sort", "id asc"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 6); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 1); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 2); - } - - - - - // function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "id asc"); - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 1); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 6); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 2); - } - -// Dup - // function sorting + // SOLR-6203 { + final TestParamsAndOutcome[] testList = new TestParamsAndOutcome[] { + + // non-function sorting + new TestParamsAndOutcome("payload desc, id desc", "id asc", 1, 0, 0, + new Integer[][]{{6}, {1}, {2}}), + + // function sorting + new TestParamsAndOutcome("abs(sub(5,id)) asc, id desc", "id asc", 1, 0, 0, + new Integer[][]{{1}, {6}, {2}}), + new TestParamsAndOutcome("sub(5,id) asc", "id asc", 1, 0, 0, + new Integer[][]{{2}, {1}, {6}}), + new TestParamsAndOutcome("abs(sub(5,id)) asc, id desc", "id desc", 1, 0, 0, + new Integer[][]{{9}, {8}, {10}}), + new TestParamsAndOutcome("abs(sub(5,id)) desc, id desc", "sum(3,id) asc", 2, 0, 0, + new Integer[][]{{2, 4}, {1, 3}, {6, 7}}), + new TestParamsAndOutcome("abs(sub(5,id)) asc, id desc", "sum(3,id) asc", 3, 0, 0, + new Integer[][]{{1, 3, 5}, {6, 7, 8}, {2, 4, 10} }), + new TestParamsAndOutcome("abs(sub(5,id)) asc, id desc", "sum(3,id) asc", 3, 0, 2, + new Integer[][]{{2, 4, 10}}), + new TestParamsAndOutcome("abs(sub(5,id)) asc, id desc", "sum(3,id) asc", 3, 1, 0, + new Integer[][]{{3, 5, 9}, {7, 8}, {4, 10} }), + new TestParamsAndOutcome("abs(sub(5,id)) asc, id desc", "sum(3,id) asc", 3, 2, 0, + new Integer[][]{{5, 9}, {8}, {10} }), + new TestParamsAndOutcome("sum(val,id) asc, id desc", "id asc", 3, 0, 0, + new Integer[][]{{2, 4, 10}, {6, 7, 8}, {1, 3, 5} }) + }; + + for (TestParamsAndOutcome test : testList ) { rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "id asc"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 1); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 6); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); + "group", "true", "group.field", "val", + "sort", test.testParams.sort, "group.sort", test.testParams.groupSort, + "group.limit", test.testParams.groupLimit, "group.offset", test.testParams.groupOffset, + "start", test.testParams.start); - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 2); - } - - - // function sorting (group.sort desc) - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "id desc"); + TestOutcome currOutcome = test.testOutcome; + int numExpectedGroups = currOutcome.getNumGroups(); + int numExpectedMatches = 10; NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - assertNotNull(grouped); - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); + assertEquals(numExpectedMatches, valFieldList.get("matches")); List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 9); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 8); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 10); + assertEquals(numExpectedGroups, groupLists.size()); + for (int j=0; j groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 9); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 8); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 10); - } - - // function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) desc, id desc", "group", "true", "group.field", "val", "group.sort", "sum(3,id) asc", "group.limit", "2"); - - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 2, 4); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 1, 3); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 6, 7); - } - - - // function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "sum(3,id) asc", "group.limit", "3"); - - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 1, 3, 5); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 6, 7, 8); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 2, 4, 10); - } - - - // function sorting - // Pagination is on level of groups, not individual offers. - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", "start", "2", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "sum(3,id) asc", "group.limit", "3"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(1, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 2, 4, 10); - } - - - // function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "sum(3,id) asc", "group.limit", "3", "group.offset", "1"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 3, 5,9 ); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 7, 8); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 4, 10); - } - - // function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "sum(3,id) asc", "group.limit", "3", "group.offset", "2"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 5, 9 ); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 8); - - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 10); - } - - // function sorting - { - rsp = query("q", "id:[1 TO 10]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "sum(3,id) desc", "group.limit", "3"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(10, valFieldList.get("matches")); - List groupLists = (List)valFieldList.get("groups"); - assertEquals(3, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 9, 5, 3); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 8, 7, 6); - - NamedList groupList2 = (NamedList)groupLists.get(2); - assertNotNull(groupList2); - - assertFieldValues((SolrDocumentList)groupList2.get("doclist"), id, 10, 4, 2); - } - - // function sorting (id:[1 TO 2]) - { - rsp = query("q", "id:[1 TO 2]", "fl", "id", "rows", "20", - "sort", "abs(sub(5,id)) asc, id desc", "group", "true", "group.field", "val", "group.sort", "id asc"); - - NamedList grouped = (NamedList)rsp.getResponse().get("grouped"); - - assertNotNull(grouped); - - NamedList valFieldList = (NamedList)grouped.get("val"); - assertEquals(2, valFieldList.get("matches")); - - List groupLists = (List)valFieldList.get("groups"); - assertEquals(2, groupLists.size()); - - NamedList groupList0 = (NamedList)groupLists.get(0); - assertNotNull(groupList0); - - assertFieldValues((SolrDocumentList)groupList0.get("doclist"), id, 2); - - NamedList groupList1 = (NamedList)groupLists.get(1); - assertNotNull(groupList1); - - assertFieldValues((SolrDocumentList)groupList1.get("doclist"), id, 1); - } - - } } diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java index 9c90efb03e74..b3f1f306b6e7 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java @@ -34,13 +34,14 @@ public void test() throws Exception { index(id, 19, "b_t", "snake spider shark snail slug", "foo_i", "2"); index(id, 20, "b_t", "snake spider shark snail", "foo_i", "3"); index(id, 21, "b_t", "snake spider shark", "foo_i", "2"); - index(id, 22, "b_t", "snake spider"); - index(id, 23, "b_t", "snake"); - index(id, 24, "b_t", "ant zebra"); - index(id, 25, "b_t", "zebra"); + index(id, 22, "b_t", "snake spider", "c_t", "snake spider"); + index(id, 23, "b_t", "snake", "c_t", "snake"); + index(id, 24, "b_t", "ant zebra", "c_t", "ant zebra"); + index(id, 25, "b_t", "zebra", "c_t", "zebra"); commit(); handle.clear(); + handle.put("terms", UNORDERED); query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "b_t"); query("qt", "/terms", "shards.qt", "/terms", "terms.limit", 5, "terms", "true", "terms.fl", "b_t", "terms.lower", "s"); @@ -53,5 +54,6 @@ public void test() throws Exception { query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "foo_i", "terms.list", "2, 3, 1"); query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "foo_i", "terms.stats", "true","terms.list", "2, 3, 1"); query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "b_t", "terms.list", "snake, zebra", "terms.ttf", "true"); + query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "b_t", "terms.fl", "c_t", "terms.list", "snake, ant, zebra", "terms.ttf", "true"); } } diff --git a/solr/core/src/test/org/apache/solr/handler/component/ResourceSharingTestComponent.java b/solr/core/src/test/org/apache/solr/handler/component/ResourceSharingTestComponent.java index 7c4e6639a1b0..d268a4e424da 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/ResourceSharingTestComponent.java +++ b/solr/core/src/test/org/apache/solr/handler/component/ResourceSharingTestComponent.java @@ -63,11 +63,6 @@ public String getDescription() { return "ResourceSharingTestComponent"; } - @Override - public String getSource() { - return null; - } - @SuppressWarnings("unchecked") TestObject getTestObj() { return this.blob.get(); diff --git a/solr/core/src/test/org/apache/solr/handler/component/SpatialHeatmapFacetsTest.java b/solr/core/src/test/org/apache/solr/handler/component/SpatialHeatmapFacetsTest.java index 78dd919514ff..8d66b07e72e1 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/SpatialHeatmapFacetsTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/SpatialHeatmapFacetsTest.java @@ -119,6 +119,28 @@ public void test() throws Exception { counts ); + // now this time we add a filter query and exclude it + QueryResponse response = query(params(baseParams, + "fq", "{!tag=excludeme}id:0", // filter to only be id:0 + FacetParams.FACET_HEATMAP, "{!ex=excludeme}" + FIELD, // exclude the filter + FacetParams.FACET_HEATMAP_GEOM, "[\"50 20\" TO \"180 90\"]", + FacetParams.FACET_HEATMAP_LEVEL, "4")); + assertEquals(1, response.getResults().getNumFound());// because of our 'fq' + hmObj = getHmObj(response); + counts = (List>) hmObj.get("counts_ints2D"); + assertEquals( + Arrays.asList( // same counts as before + Arrays.asList(0, 0, 2, 1, 0, 0), + Arrays.asList(0, 0, 1, 1, 0, 0), + Arrays.asList(0, 1, 1, 1, 0, 0), + Arrays.asList(0, 0, 1, 1, 0, 0), + Arrays.asList(0, 0, 1, 1, 0, 0), + null, + null + ), + counts + ); + // test using a circle input shape hmObj = getHmObj(query(params(baseParams, FacetParams.FACET_HEATMAP_GEOM, "BUFFER(POINT(110 40), 7)", diff --git a/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java index 7fb5e1202603..925cb248e387 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java @@ -326,14 +326,14 @@ public void testDocFreqAndTotalTermFreq() throws Exception { "terms.list", "snake,spider,shark,ddddd"); assertQ(req, "count(//lst[@name='standardfilt']/*)=4", - "//lst[@name='standardfilt']/lst[@name='ddddd']/long[@name='docFreq'][.='4']", - "//lst[@name='standardfilt']/lst[@name='ddddd']/long[@name='totalTermFreq'][.='4']", - "//lst[@name='standardfilt']/lst[@name='shark']/long[@name='docFreq'][.='2']", - "//lst[@name='standardfilt']/lst[@name='shark']/long[@name='totalTermFreq'][.='2']", - "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='docFreq'][.='3']", - "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='totalTermFreq'][.='3']", - "//lst[@name='standardfilt']/lst[@name='spider']/long[@name='docFreq'][.='1']", - "//lst[@name='standardfilt']/lst[@name='spider']/long[@name='totalTermFreq'][.='1']"); + "//lst[@name='standardfilt']/lst[@name='ddddd']/long[@name='df'][.='4']", + "//lst[@name='standardfilt']/lst[@name='ddddd']/long[@name='ttf'][.='4']", + "//lst[@name='standardfilt']/lst[@name='shark']/long[@name='df'][.='2']", + "//lst[@name='standardfilt']/lst[@name='shark']/long[@name='ttf'][.='2']", + "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='df'][.='3']", + "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='ttf'][.='3']", + "//lst[@name='standardfilt']/lst[@name='spider']/long[@name='df'][.='1']", + "//lst[@name='standardfilt']/lst[@name='spider']/long[@name='ttf'][.='1']"); } @Test @@ -347,8 +347,35 @@ public void testDocFreqAndTotalTermFreqForNonExistingTerm() throws Exception { "terms.list", "boo,snake"); assertQ(req, "count(//lst[@name='standardfilt']/*)=1", - "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='docFreq'][.='3']", - "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='totalTermFreq'][.='3']"); + "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='df'][.='3']", + "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='ttf'][.='3']"); + } + + @Test + public void testDocFreqAndTotalTermFreqForMultipleFields() throws Exception { + SolrQueryRequest req = req( + "indent","true", + "qt", "/terms", + "terms", "true", + "terms.fl", "lowerfilt", + "terms.fl", "standardfilt", + "terms.ttf", "true", + "terms.list", "a,aa,aaa"); + assertQ(req, + "count(//lst[@name='lowerfilt']/*)=3", + "count(//lst[@name='standardfilt']/*)=3", + "//lst[@name='lowerfilt']/lst[@name='a']/long[@name='df'][.='2']", + "//lst[@name='lowerfilt']/lst[@name='a']/long[@name='ttf'][.='2']", + "//lst[@name='lowerfilt']/lst[@name='aa']/long[@name='df'][.='1']", + "//lst[@name='lowerfilt']/lst[@name='aa']/long[@name='ttf'][.='1']", + "//lst[@name='lowerfilt']/lst[@name='aaa']/long[@name='df'][.='1']", + "//lst[@name='lowerfilt']/lst[@name='aaa']/long[@name='ttf'][.='1']", + "//lst[@name='standardfilt']/lst[@name='a']/long[@name='df'][.='1']", + "//lst[@name='standardfilt']/lst[@name='a']/long[@name='ttf'][.='1']", + "//lst[@name='standardfilt']/lst[@name='aa']/long[@name='df'][.='1']", + "//lst[@name='standardfilt']/lst[@name='aa']/long[@name='ttf'][.='1']", + "//lst[@name='standardfilt']/lst[@name='aaa']/long[@name='df'][.='1']", + "//lst[@name='standardfilt']/lst[@name='aaa']/long[@name='ttf'][.='1']"); } } diff --git a/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java b/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java index 6506f98b9148..f0b58cd53f39 100644 --- a/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java +++ b/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java @@ -20,7 +20,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; @@ -35,6 +37,7 @@ import org.apache.solr.handler.component.HighlightComponent; import org.apache.solr.handler.component.ResponseBuilder; import org.apache.solr.handler.component.SearchComponent; +import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.search.DocSet; @@ -868,6 +871,8 @@ public void testGetHighlightFields() { "text", "test", // static not stored "foo_s", "test", // dynamic stored "foo_sI", "test", // dynamic not stored + "bar_s", "test", // dynamic stored + "bar_sI", "test", // dynamic not stored "weight", "1.0")); // stored but not text assertU(commit()); assertU(optimize()); @@ -898,6 +903,21 @@ public void testGetHighlightFields() { assertEquals("Expected to highlight on field \"foo_s\"", "foo_s", highlightFieldNames.get(0)); request.close(); + + // SOLR-5127 + args.put("hl.fl", (random().nextBoolean() ? "foo_*,bar_*" : "bar_*,foo_*")); + lrf = h.getRequestFactory("standard", 0, 10, args); + // hl.fl ordering need not be preserved in output + final Set highlightedSetExpected = new HashSet(); + highlightedSetExpected.add("foo_s"); + highlightedSetExpected.add("bar_s"); + try (LocalSolrQueryRequest localRequest = lrf.makeRequest("test")) { + highlighter = HighlightComponent.getHighlighter(h.getCore()); + final Set highlightedSetActual = new HashSet( + Arrays.asList(highlighter.getHighlightFields(null, + localRequest, new String[] {}))); + assertEquals(highlightedSetExpected, highlightedSetActual); + } } @Test diff --git a/solr/core/src/test/org/apache/solr/highlight/TestPostingsSolrHighlighter.java b/solr/core/src/test/org/apache/solr/highlight/TestPostingsSolrHighlighter.java index 3f25464a4462..3862fa6d106a 100644 --- a/solr/core/src/test/org/apache/solr/highlight/TestPostingsSolrHighlighter.java +++ b/solr/core/src/test/org/apache/solr/highlight/TestPostingsSolrHighlighter.java @@ -99,6 +99,19 @@ public void testTwoFields() { "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text3']/str='crappier document'"); } + // SOLR-5127 + public void testMultipleFieldsViaWildcard() { + assertQ("highlighting text and text3*", + req("q", (random().nextBoolean() ? "text:document text3:document" : "text3:document text:document"), + "sort", "id asc", "hl", "true", + "hl.fl", (random().nextBoolean() ? "text,text3*" : "text3*,text")), + "count(//lst[@name='highlighting']/*)=2", + "//lst[@name='highlighting']/lst[@name='101']/arr[@name='text']/str='document one'", + "//lst[@name='highlighting']/lst[@name='101']/arr[@name='text3']/str='crappy document'", + "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text']/str='second document'", + "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text3']/str='crappier document'"); + } + public void testMisconfiguredField() { ignoreException("was indexed without offsets"); try { diff --git a/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java b/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java index 2f7a003c88a6..ad1ca0336efa 100644 --- a/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java +++ b/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java @@ -179,6 +179,19 @@ public void testTwoFields() { "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text3']/str='crappier document'"); } + // SOLR-5127 + public void testMultipleFieldsViaWildcard() { + assertQ("highlighting text and text3*", + req("q", (random().nextBoolean() ? "text:document text3:document" : "text3:document text:document"), + "sort", "id asc", "hl", "true", + "hl.fl", (random().nextBoolean() ? "text,text3*" : "text3*,text")), + "count(//lst[@name='highlighting']/*)=2", + "//lst[@name='highlighting']/lst[@name='101']/arr[@name='text']/str='document one'", + "//lst[@name='highlighting']/lst[@name='101']/arr[@name='text3']/str='crappy document'", + "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text']/str='second document'", + "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text3']/str='crappier document'"); + } + public void testTags() { assertQ("different pre/post tags", req("q", "text:document", "sort", "id asc", "hl", "true", "hl.tag.pre", "[", "hl.tag.post", "]"), diff --git a/solr/core/src/test/org/apache/solr/metrics/JvmMetricsTest.java b/solr/core/src/test/org/apache/solr/metrics/JvmMetricsTest.java index 72adc6863540..020fe5ecefaf 100644 --- a/solr/core/src/test/org/apache/solr/metrics/JvmMetricsTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/JvmMetricsTest.java @@ -16,13 +16,18 @@ */ package org.apache.solr.metrics; -import javax.management.MBeanServer; -import java.lang.management.ManagementFactory; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; import java.util.Map; import com.codahale.metrics.Gauge; import com.codahale.metrics.Metric; +import org.apache.commons.io.FileUtils; import org.apache.solr.SolrJettyTestBase; +import org.apache.solr.core.NodeConfig; +import org.apache.solr.core.SolrResourceLoader; +import org.apache.solr.core.SolrXmlConfig; import org.junit.BeforeClass; import org.junit.Test; @@ -31,26 +36,103 @@ */ public class JvmMetricsTest extends SolrJettyTestBase { + static final String[] STRING_OS_METRICS = { + "arch", + "name", + "version" + }; + static final String[] NUMERIC_OS_METRICS = { + "availableProcessors", + "systemLoadAverage" + }; + + static final String[] BUFFER_METRICS = { + "direct.Count", + "direct.MemoryUsed", + "direct.TotalCapacity", + "mapped.Count", + "mapped.MemoryUsed", + "mapped.TotalCapacity" + }; + @BeforeClass public static void beforeTest() throws Exception { createJetty(legacyExampleCollection1SolrHome()); } @Test - public void testOperatingSystemMetricsSet() throws Exception { - MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); - OperatingSystemMetricSet set = new OperatingSystemMetricSet(mBeanServer); + public void testOperatingSystemMetricSet() throws Exception { + OperatingSystemMetricSet set = new OperatingSystemMetricSet(); Map metrics = set.getMetrics(); assertTrue(metrics.size() > 0); - for (String metric : OperatingSystemMetricSet.METRICS) { + for (String metric : NUMERIC_OS_METRICS) { Gauge gauge = (Gauge)metrics.get(metric); - if (gauge == null || gauge.getValue() == null) { // some are optional depending on OS - continue; - } + assertNotNull(metric, gauge); double value = ((Number)gauge.getValue()).doubleValue(); // SystemLoadAverage on Windows may be -1.0 assertTrue("unexpected value of " + metric + ": " + value, value >= 0 || value == -1.0); } + for (String metric : STRING_OS_METRICS) { + Gauge gauge = (Gauge)metrics.get(metric); + assertNotNull(metric, gauge); + String value = (String)gauge.getValue(); + assertNotNull(value); + assertFalse(value.isEmpty()); + } + } + + @Test + public void testAltBufferPoolMetricSet() throws Exception { + AltBufferPoolMetricSet set = new AltBufferPoolMetricSet(); + Map metrics = set.getMetrics(); + assertTrue(metrics.size() > 0); + for (String name : BUFFER_METRICS) { + assertNotNull(name, metrics.get(name)); + Object g = metrics.get(name); + assertTrue(g instanceof Gauge); + Object v = ((Gauge)g).getValue(); + assertTrue(v instanceof Long); + } + } + + @Test + public void testSystemProperties() throws Exception { + if (System.getProperty("basicauth") == null) { + // make sure it's set + System.setProperty("basicauth", "foo:bar"); + } + SolrMetricManager metricManager = jetty.getCoreContainer().getMetricManager(); + Map metrics = metricManager.registry("solr.jvm").getMetrics(); + MetricsMap map = (MetricsMap)metrics.get("system.properties"); + assertNotNull(map); + Map values = map.getValue(); + System.getProperties().forEach((k, v) -> { + if (NodeConfig.NodeConfigBuilder.DEFAULT_HIDDEN_SYS_PROPS.contains(k)) { + assertNull("hidden property " + k + " present!", values.get(k)); + } else { + assertEquals(v, values.get(String.valueOf(k))); + } + }); + } + + @Test + public void testHiddenSysProps() throws Exception { + Path home = Paths.get(TEST_HOME()); + SolrResourceLoader loader = new SolrResourceLoader(home); + + // default config + String solrXml = FileUtils.readFileToString(Paths.get(home.toString(), "solr.xml").toFile(), "UTF-8"); + NodeConfig config = SolrXmlConfig.fromString(loader, solrXml); + NodeConfig.NodeConfigBuilder.DEFAULT_HIDDEN_SYS_PROPS.forEach(s -> { + assertTrue(s, config.getHiddenSysProps().contains(s)); + }); + + // custom config + solrXml = FileUtils.readFileToString(Paths.get(home.toString(), "solr-hiddensysprops.xml").toFile(), "UTF-8"); + NodeConfig config2 = SolrXmlConfig.fromString(loader, solrXml); + Arrays.asList("foo", "bar", "baz").forEach(s -> { + assertTrue(s, config2.getHiddenSysProps().contains(s)); + }); } @Test @@ -64,5 +146,6 @@ public void testSetupJvmMetrics() throws Exception { assertTrue(metrics.toString(), metrics.entrySet().stream().filter(e -> e.getKey().startsWith("gc.")).count() > 0); assertTrue(metrics.toString(), metrics.entrySet().stream().filter(e -> e.getKey().startsWith("memory.")).count() > 0); assertTrue(metrics.toString(), metrics.entrySet().stream().filter(e -> e.getKey().startsWith("threads.")).count() > 0); + assertTrue(metrics.toString(), metrics.entrySet().stream().filter(e -> e.getKey().startsWith("system.")).count() > 0); } } diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrCoreMetricManagerTest.java b/solr/core/src/test/org/apache/solr/metrics/SolrCoreMetricManagerTest.java index 6e8e1e58e92b..3001e0cfba99 100644 --- a/solr/core/src/test/org/apache/solr/metrics/SolrCoreMetricManagerTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/SolrCoreMetricManagerTest.java @@ -29,7 +29,7 @@ import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.core.PluginInfo; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.reporters.MockMetricReporter; import org.apache.solr.schema.FieldType; import org.junit.After; @@ -46,7 +46,7 @@ public class SolrCoreMetricManagerTest extends SolrTestCaseJ4 { public void beforeTest() throws Exception { initCore("solrconfig-basic.xml", "schema.xml"); coreMetricManager = h.getCore().getCoreMetricManager(); - metricManager = h.getCore().getCoreDescriptor().getCoreContainer().getMetricManager(); + metricManager = h.getCore().getCoreContainer().getMetricManager(); } @After @@ -61,7 +61,7 @@ public void testRegisterMetrics() { Random random = random(); String scope = SolrMetricTestUtils.getRandomScope(random); - SolrInfoMBean.Category category = SolrMetricTestUtils.getRandomCategory(random); + SolrInfoBean.Category category = SolrMetricTestUtils.getRandomCategory(random); Map metrics = SolrMetricTestUtils.getRandomMetrics(random); SolrMetricProducer producer = SolrMetricTestUtils.getProducerOf(metricManager, category, scope, metrics); try { @@ -82,7 +82,7 @@ public void testRegisterMetricsWithReplacements() { Map registered = new HashMap<>(); String scope = SolrMetricTestUtils.getRandomScope(random, true); - SolrInfoMBean.Category category = SolrMetricTestUtils.getRandomCategory(random, true); + SolrInfoBean.Category category = SolrMetricTestUtils.getRandomCategory(random, true); int iterations = TestUtil.nextInt(random, 0, MAX_ITERATIONS); for (int i = 0; i < iterations; ++i) { diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java b/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java index 1c29c5e9a732..d30611904c3a 100644 --- a/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java @@ -29,7 +29,7 @@ import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.util.NamedList; import org.apache.solr.core.PluginInfo; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.metrics.reporters.MockMetricReporter; import org.junit.Test; @@ -62,10 +62,10 @@ public void testSwapRegistries() throws Exception { String toName = "to-" + TestUtil.randomSimpleString(r, 1, 10); // register test metrics for (Map.Entry entry : metrics1.entrySet()) { - metricManager.register(fromName, entry.getValue(), false, entry.getKey(), "metrics1"); + metricManager.register(null, fromName, entry.getValue(), false, entry.getKey(), "metrics1"); } for (Map.Entry entry : metrics2.entrySet()) { - metricManager.register(toName, entry.getValue(), false, entry.getKey(), "metrics2"); + metricManager.register(null, toName, entry.getValue(), false, entry.getKey(), "metrics2"); } assertEquals(metrics1.size(), metricManager.registry(fromName).getMetrics().size()); assertEquals(metrics2.size(), metricManager.registry(toName).getMetrics().size()); @@ -125,13 +125,13 @@ public void testClearMetrics() throws Exception { String registryName = TestUtil.randomSimpleString(r, 1, 10); for (Map.Entry entry : metrics.entrySet()) { - metricManager.register(registryName, entry.getValue(), false, entry.getKey(), "foo", "bar"); + metricManager.register(null, registryName, entry.getValue(), false, entry.getKey(), "foo", "bar"); } for (Map.Entry entry : metrics.entrySet()) { - metricManager.register(registryName, entry.getValue(), false, entry.getKey(), "foo", "baz"); + metricManager.register(null, registryName, entry.getValue(), false, entry.getKey(), "foo", "baz"); } for (Map.Entry entry : metrics.entrySet()) { - metricManager.register(registryName, entry.getValue(), false, entry.getKey(), "foo"); + metricManager.register(null, registryName, entry.getValue(), false, entry.getKey(), "foo"); } assertEquals(metrics.size() * 3, metricManager.registry(registryName).getMetrics().size()); @@ -163,10 +163,10 @@ public void testSimpleMetrics() throws Exception { String registryName = TestUtil.randomSimpleString(r, 1, 10); - metricManager.counter(registryName, "simple_counter", "foo", "bar"); - metricManager.timer(registryName, "simple_timer", "foo", "bar"); - metricManager.meter(registryName, "simple_meter", "foo", "bar"); - metricManager.histogram(registryName, "simple_histogram", "foo", "bar"); + metricManager.counter(null, registryName, "simple_counter", "foo", "bar"); + metricManager.timer(null, registryName, "simple_timer", "foo", "bar"); + metricManager.meter(null, registryName, "simple_meter", "foo", "bar"); + metricManager.histogram(null, registryName, "simple_histogram", "foo", "bar"); Map metrics = metricManager.registry(registryName).getMetrics(); assertEquals(4, metrics.size()); for (Map.Entry entry : metrics.entrySet()) { @@ -180,13 +180,13 @@ public void testRegistryName() throws Exception { String name = TestUtil.randomSimpleString(r, 1, 10); - String result = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, name, "collection1"); + String result = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, name, "collection1"); assertEquals("solr.core." + name + ".collection1", result); // try it with already prefixed name - group will be ignored - result = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, result); + result = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, result); assertEquals("solr.core." + name + ".collection1", result); // try it with already prefixed name but with additional segments - result = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, result, "shard1", "replica1"); + result = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, result, "shard1", "replica1"); assertEquals("solr.core." + name + ".collection1.shard1.replica1", result); } @@ -206,18 +206,18 @@ public void testReporters() throws Exception { createPluginInfo("core_foo", "core", null) }; String tag = "xyz"; - metricManager.loadReporters(plugins, loader, tag, SolrInfoMBean.Group.node); + metricManager.loadReporters(plugins, loader, tag, SolrInfoBean.Group.node); Map reporters = metricManager.getReporters( - SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)); + SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)); assertEquals(4, reporters.size()); assertTrue(reporters.containsKey("universal_foo@" + tag)); assertTrue(reporters.containsKey("multigroup_foo@" + tag)); assertTrue(reporters.containsKey("node_foo@" + tag)); assertTrue(reporters.containsKey("multiregistry_foo@" + tag)); - metricManager.loadReporters(plugins, loader, tag, SolrInfoMBean.Group.core, "collection1"); + metricManager.loadReporters(plugins, loader, tag, SolrInfoBean.Group.core, "collection1"); reporters = metricManager.getReporters( - SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, "collection1")); + SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, "collection1")); assertEquals(5, reporters.size()); assertTrue(reporters.containsKey("universal_foo@" + tag)); assertTrue(reporters.containsKey("multigroup_foo@" + tag)); @@ -225,26 +225,26 @@ public void testReporters() throws Exception { assertTrue(reporters.containsKey("core_foo@" + tag)); assertTrue(reporters.containsKey("multiregistry_foo@" + tag)); - metricManager.loadReporters(plugins, loader, tag, SolrInfoMBean.Group.jvm); + metricManager.loadReporters(plugins, loader, tag, SolrInfoBean.Group.jvm); reporters = metricManager.getReporters( - SolrMetricManager.getRegistryName(SolrInfoMBean.Group.jvm)); + SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm)); assertEquals(2, reporters.size()); assertTrue(reporters.containsKey("universal_foo@" + tag)); assertTrue(reporters.containsKey("multigroup_foo@" + tag)); metricManager.removeRegistry("solr.jvm"); reporters = metricManager.getReporters( - SolrMetricManager.getRegistryName(SolrInfoMBean.Group.jvm)); + SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm)); assertEquals(0, reporters.size()); metricManager.removeRegistry("solr.node"); reporters = metricManager.getReporters( - SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)); + SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)); assertEquals(0, reporters.size()); metricManager.removeRegistry("solr.core.collection1"); reporters = metricManager.getReporters( - SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, "collection1")); + SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, "collection1")); assertEquals(0, reporters.size()); } diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrMetricReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/SolrMetricReporterTest.java index b275919a873f..f3359cca5a0c 100644 --- a/solr/core/src/test/org/apache/solr/metrics/SolrMetricReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/SolrMetricReporterTest.java @@ -42,6 +42,7 @@ public void testInit() throws Exception { Map attrs = new HashMap<>(); attrs.put(FieldType.CLASS_NAME, MockMetricReporter.class.getName()); attrs.put(CoreAdminParams.NAME, TestUtil.randomUnicodeString(random)); + attrs.put("enabled", random.nextBoolean()); boolean shouldDefineConfigurable = random.nextBoolean(); String configurable = TestUtil.randomUnicodeString(random); diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrMetricTestUtils.java b/solr/core/src/test/org/apache/solr/metrics/SolrMetricTestUtils.java index 6bd6500b4fe1..98fc9b1c810d 100644 --- a/solr/core/src/test/org/apache/solr/metrics/SolrMetricTestUtils.java +++ b/solr/core/src/test/org/apache/solr/metrics/SolrMetricTestUtils.java @@ -23,12 +23,12 @@ import com.codahale.metrics.Counter; import org.apache.lucene.util.TestUtil; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; public final class SolrMetricTestUtils { private static final int MAX_ITERATIONS = 100; - private static final SolrInfoMBean.Category CATEGORIES[] = SolrInfoMBean.Category.values(); + private static final SolrInfoBean.Category CATEGORIES[] = SolrInfoBean.Category.values(); public static String getRandomScope(Random random) { return getRandomScope(random, random.nextBoolean()); @@ -38,11 +38,11 @@ public static String getRandomScope(Random random, boolean shouldDefineScope) { return shouldDefineScope ? TestUtil.randomSimpleString(random, 1, 10) : null; // must be simple string for JMX publishing } - public static SolrInfoMBean.Category getRandomCategory(Random random) { + public static SolrInfoBean.Category getRandomCategory(Random random) { return getRandomCategory(random, random.nextBoolean()); } - public static SolrInfoMBean.Category getRandomCategory(Random random, boolean shouldDefineCategory) { + public static SolrInfoBean.Category getRandomCategory(Random random, boolean shouldDefineCategory) { return shouldDefineCategory ? CATEGORIES[TestUtil.nextInt(random, 0, CATEGORIES.length - 1)] : null; } @@ -75,7 +75,7 @@ public static Map getRandomMetricsWithReplacements(Random rando return metrics; } - public static SolrMetricProducer getProducerOf(SolrMetricManager metricManager, SolrInfoMBean.Category category, String scope, Map metrics) { + public static SolrMetricProducer getProducerOf(SolrMetricManager metricManager, SolrInfoBean.Category category, String scope, Map metrics) { return new SolrMetricProducer() { @Override public void initializeMetrics(SolrMetricManager manager, String registry, String scope) { @@ -86,7 +86,7 @@ public void initializeMetrics(SolrMetricManager manager, String registry, String return; } for (Map.Entry entry : metrics.entrySet()) { - manager.counter(registry, entry.getKey(), category.toString(), scope); + manager.counter(null, registry, entry.getKey(), category.toString(), scope); } } diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java b/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java index dfb5a0fa2da5..56dab37e65eb 100644 --- a/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java @@ -29,10 +29,11 @@ import org.apache.solr.core.CoreContainer; import org.apache.solr.core.NodeConfig; import org.apache.solr.core.PluginInfo; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.core.SolrXmlConfig; import org.apache.solr.metrics.reporters.MockMetricReporter; +import org.apache.solr.util.JmxUtil; import org.apache.solr.util.TestHarness; import org.junit.After; import org.junit.Before; @@ -50,11 +51,12 @@ public class SolrMetricsIntegrationTest extends SolrTestCaseJ4 { private static final String MULTIREGISTRY = "multiregistry"; private static final String[] INITIAL_REPORTERS = {REPORTER_NAMES[0], REPORTER_NAMES[1], UNIVERSAL, SPECIFIC, MULTIGROUP, MULTIREGISTRY}; private static final String[] RENAMED_REPORTERS = {REPORTER_NAMES[0], REPORTER_NAMES[1], UNIVERSAL, MULTIGROUP}; - private static final SolrInfoMBean.Category HANDLER_CATEGORY = SolrInfoMBean.Category.QUERY; + private static final SolrInfoBean.Category HANDLER_CATEGORY = SolrInfoBean.Category.QUERY; private CoreContainer cc; private SolrMetricManager metricManager; private String tag; + private int jmxReporter; private void assertTagged(Map reporters, String name) { assertTrue("Reporter '" + name + "' missing in " + reporters, reporters.containsKey(name + "@" + tag)); @@ -71,11 +73,12 @@ public void beforeTest() throws Exception { cc = createCoreContainer(cfg, new TestHarness.TestCoresLocator(DEFAULT_TEST_CORENAME, initCoreDataDir.getAbsolutePath(), "solrconfig.xml", "schema.xml")); h.coreName = DEFAULT_TEST_CORENAME; + jmxReporter = JmxUtil.findFirstMBeanServer() != null ? 1 : 0; metricManager = cc.getMetricManager(); tag = h.getCore().getCoreMetricManager().getTag(); // initially there are more reporters, because two of them are added via a matching collection name Map reporters = metricManager.getReporters("solr.core." + DEFAULT_TEST_CORENAME); - assertEquals(INITIAL_REPORTERS.length, reporters.size()); + assertEquals(INITIAL_REPORTERS.length + jmxReporter, reporters.size()); for (String r : INITIAL_REPORTERS) { assertTagged(reporters, r); } @@ -85,9 +88,9 @@ public void beforeTest() throws Exception { cfg = cc.getConfig(); PluginInfo[] plugins = cfg.getMetricReporterPlugins(); assertNotNull(plugins); - assertEquals(10, plugins.length); + assertEquals(10 + jmxReporter, plugins.length); reporters = metricManager.getReporters("solr.node"); - assertEquals(4, reporters.size()); + assertEquals(4 + jmxReporter, reporters.size()); assertTrue("Reporter '" + REPORTER_NAMES[0] + "' missing in solr.node", reporters.containsKey(REPORTER_NAMES[0])); assertTrue("Reporter '" + UNIVERSAL + "' missing in solr.node", reporters.containsKey(UNIVERSAL)); assertTrue("Reporter '" + MULTIGROUP + "' missing in solr.node", reporters.containsKey(MULTIGROUP)); @@ -120,7 +123,7 @@ public void testConfigureReporter() throws Exception { String metricName = SolrMetricManager.mkName(METRIC_NAME, HANDLER_CATEGORY.toString(), HANDLER_NAME); SolrCoreMetricManager coreMetricManager = h.getCore().getCoreMetricManager(); - Timer timer = (Timer) metricManager.timer(coreMetricManager.getRegistryName(), metricName); + Timer timer = (Timer) metricManager.timer(null, coreMetricManager.getRegistryName(), metricName); long initialCount = timer.getCount(); @@ -132,7 +135,7 @@ public void testConfigureReporter() throws Exception { long finalCount = timer.getCount(); assertEquals("metric counter incorrect", iterations, finalCount - initialCount); Map reporters = metricManager.getReporters(coreMetricManager.getRegistryName()); - assertEquals(RENAMED_REPORTERS.length, reporters.size()); + assertEquals(RENAMED_REPORTERS.length + jmxReporter, reporters.size()); // SPECIFIC and MULTIREGISTRY were skipped because they were // specific to collection1 diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java index c50ff3c8419f..eca414cd8ff0 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java @@ -64,7 +64,7 @@ public void testReporter() throws Exception { h.coreName = DEFAULT_TEST_CORENAME; SolrMetricManager metricManager = cc.getMetricManager(); Map reporters = metricManager.getReporters("solr.node"); - assertEquals(1, reporters.size()); + assertTrue(reporters.toString(), reporters.size() >= 1); SolrMetricReporter reporter = reporters.get("test"); assertNotNull(reporter); assertTrue(reporter instanceof SolrGangliaReporter); diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java index f45b19359db6..54385049033c 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java @@ -35,6 +35,7 @@ import org.apache.solr.core.SolrXmlConfig; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricReporter; +import org.apache.solr.util.JmxUtil; import org.apache.solr.util.TestHarness; import org.junit.Test; @@ -45,6 +46,7 @@ public class SolrGraphiteReporterTest extends SolrTestCaseJ4 { @Test public void testReporter() throws Exception { + int jmxReporter = JmxUtil.findFirstMBeanServer() != null ? 1: 0; Path home = Paths.get(TEST_HOME()); // define these properties, they are used in solrconfig.xml System.setProperty("solr.test.sys.prop1", "propone"); @@ -63,7 +65,7 @@ public void testReporter() throws Exception { h.coreName = DEFAULT_TEST_CORENAME; SolrMetricManager metricManager = cc.getMetricManager(); Map reporters = metricManager.getReporters("solr.node"); - assertEquals(1, reporters.size()); + assertEquals(1 + jmxReporter, reporters.size()); SolrMetricReporter reporter = reporters.get("test"); assertNotNull(reporter); assertTrue(reporter instanceof SolrGraphiteReporter); diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterTest.java index 82b9d58cc850..f9a32c046e80 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterTest.java @@ -20,6 +20,7 @@ import javax.management.ObjectInstance; import javax.management.ObjectName; +import java.rmi.registry.LocateRegistry; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -31,7 +32,7 @@ import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrCoreMetricManager; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricProducer; @@ -40,12 +41,15 @@ import org.apache.solr.schema.FieldType; import org.junit.After; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; public class SolrJmxReporterTest extends SolrTestCaseJ4 { private static final int MAX_ITERATIONS = 20; + private static int jmxPort; + private String domain; private SolrCoreMetricManager coreMetricManager; @@ -53,6 +57,14 @@ public class SolrJmxReporterTest extends SolrTestCaseJ4 { private SolrJmxReporter reporter; private MBeanServer mBeanServer; private String reporterName; + private String rootName; + + @BeforeClass + public static void init() throws Exception { + jmxPort = getNextAvailablePort(); + assertFalse(jmxPort == -1); + LocateRegistry.createRegistry(jmxPort); + } @Before public void beforeTest() throws Exception { @@ -60,10 +72,11 @@ public void beforeTest() throws Exception { final SolrCore core = h.getCore(); domain = core.getName(); + rootName = TestUtil.randomSimpleString(random(), 5, 10); coreMetricManager = core.getCoreMetricManager(); - metricManager = core.getCoreDescriptor().getCoreContainer().getMetricManager(); - PluginInfo pluginInfo = createReporterPluginInfo(); + metricManager = core.getCoreContainer().getMetricManager(); + PluginInfo pluginInfo = createReporterPluginInfo(rootName, true); metricManager.loadReporter(coreMetricManager.getRegistryName(), coreMetricManager.getCore().getResourceLoader(), pluginInfo, coreMetricManager.getTag()); @@ -79,14 +92,17 @@ public void beforeTest() throws Exception { assertNotNull("MBean server not found.", mBeanServer); } - private PluginInfo createReporterPluginInfo() { + private PluginInfo createReporterPluginInfo(String rootName, boolean enabled) { Random random = random(); String className = SolrJmxReporter.class.getName(); - String reporterName = TestUtil.randomSimpleString(random, 1, 10); + String reporterName = TestUtil.randomSimpleString(random, 5, 10); Map attrs = new HashMap<>(); attrs.put(FieldType.CLASS_NAME, className); attrs.put(CoreAdminParams.NAME, reporterName); + attrs.put("rootName", rootName); + attrs.put("enabled", enabled); + attrs.put("serviceUrl", "service:jmx:rmi:///jndi/rmi://localhost:" + jmxPort + "/solrjmx"); boolean shouldOverrideDomain = random.nextBoolean(); if (shouldOverrideDomain) { @@ -114,7 +130,7 @@ public void testReportMetrics() throws Exception { Map registered = new HashMap<>(); String scope = SolrMetricTestUtils.getRandomScope(random, true); - SolrInfoMBean.Category category = SolrMetricTestUtils.getRandomCategory(random, true); + SolrInfoBean.Category category = SolrMetricTestUtils.getRandomCategory(random, true); int iterations = TestUtil.nextInt(random, 0, MAX_ITERATIONS); for (int i = 0; i < iterations; ++i) { @@ -126,7 +142,7 @@ public void testReportMetrics() throws Exception { Set objects = mBeanServer.queryMBeans(null, null); assertEquals(registered.size(), objects.stream(). filter(o -> scope.equals(o.getObjectName().getKeyProperty("scope")) && - reporterName.equals(o.getObjectName().getKeyProperty("reporter"))).count()); + rootName.equals(o.getObjectName().getDomain())).count()); } } @@ -135,17 +151,17 @@ public void testReloadCore() throws Exception { Random random = random(); String scope = SolrMetricTestUtils.getRandomScope(random, true); - SolrInfoMBean.Category category = SolrMetricTestUtils.getRandomCategory(random, true); + SolrInfoBean.Category category = SolrMetricTestUtils.getRandomCategory(random, true); Map metrics = SolrMetricTestUtils.getRandomMetrics(random, true); SolrMetricProducer producer = SolrMetricTestUtils.getProducerOf(metricManager, category, scope, metrics); coreMetricManager.registerMetricProducer(scope, producer); Set objects = mBeanServer.queryMBeans(null, null); assertEquals(metrics.size(), objects.stream(). filter(o -> scope.equals(o.getObjectName().getKeyProperty("scope")) && - reporterName.equals(o.getObjectName().getKeyProperty("reporter"))).count()); + o.getObjectName().getDomain().equals(rootName)).count()); h.getCoreContainer().reload(h.getCore().getName()); - PluginInfo pluginInfo = createReporterPluginInfo(); + PluginInfo pluginInfo = createReporterPluginInfo(rootName, true); metricManager.loadReporter(coreMetricManager.getRegistryName(), coreMetricManager.getCore().getResourceLoader(), pluginInfo, String.valueOf(coreMetricManager.getCore().hashCode())); coreMetricManager.registerMetricProducer(scope, producer); @@ -153,7 +169,38 @@ public void testReloadCore() throws Exception { objects = mBeanServer.queryMBeans(null, null); assertEquals(metrics.size(), objects.stream(). filter(o -> scope.equals(o.getObjectName().getKeyProperty("scope")) && - pluginInfo.name.equals(o.getObjectName().getKeyProperty("reporter"))).count()); + rootName.equals(o.getObjectName().getDomain())).count()); + } + + @Test + public void testEnabled() throws Exception { + String root1 = TestUtil.randomSimpleString(random(), 5, 10); + PluginInfo pluginInfo1 = createReporterPluginInfo(root1, true); + metricManager.loadReporter(coreMetricManager.getRegistryName(), coreMetricManager.getCore().getResourceLoader(), + pluginInfo1, coreMetricManager.getTag()); + + String root2 = TestUtil.randomSimpleString(random(), 5, 10); + assertFalse(root2.equals(root1)); + PluginInfo pluginInfo2 = createReporterPluginInfo(root2, false); + metricManager.loadReporter(coreMetricManager.getRegistryName(), coreMetricManager.getCore().getResourceLoader(), + pluginInfo2, coreMetricManager.getTag()); + + Map reporters = metricManager.getReporters(coreMetricManager.getRegistryName()); + assertTrue(reporters.containsKey(pluginInfo1.name + "@" + coreMetricManager.getTag())); + assertTrue(reporters.containsKey(pluginInfo2.name + "@" + coreMetricManager.getTag())); + + String scope = SolrMetricTestUtils.getRandomScope(random(), true); + SolrInfoBean.Category category = SolrMetricTestUtils.getRandomCategory(random(), true); + Map metrics = SolrMetricTestUtils.getRandomMetrics(random(), true); + SolrMetricProducer producer = SolrMetricTestUtils.getProducerOf(metricManager, category, scope, metrics); + coreMetricManager.registerMetricProducer(scope, producer); + Set objects = mBeanServer.queryMBeans(null, null); + assertEquals(metrics.size(), objects.stream(). + filter(o -> scope.equals(o.getObjectName().getKeyProperty("scope")) && + root1.equals(o.getObjectName().getDomain())).count()); + assertEquals(0, objects.stream(). + filter(o -> scope.equals(o.getObjectName().getKeyProperty("scope")) && + root2.equals(o.getObjectName().getDomain())).count()); } } diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java index 47bf8e7216f0..a8f33437913f 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java @@ -57,7 +57,7 @@ public void testReporter() throws Exception { h.coreName = DEFAULT_TEST_CORENAME; SolrMetricManager metricManager = cc.getMetricManager(); Map reporters = metricManager.getReporters("solr.node"); - assertEquals(2, reporters.size()); + assertTrue(reporters.toString(), reporters.size() >= 2); SolrMetricReporter reporter = reporters.get("test1"); assertNotNull(reporter); assertTrue(reporter instanceof SolrSlf4jReporter); diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrCloudReportersTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrCloudReportersTest.java index 91952b889d86..df7e6428b201 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrCloudReportersTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrCloudReportersTest.java @@ -28,6 +28,7 @@ import org.apache.solr.metrics.AggregateMetric; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricReporter; +import org.apache.solr.util.JmxUtil; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -38,11 +39,13 @@ public class SolrCloudReportersTest extends SolrCloudTestCase { int leaderRegistries; int clusterRegistries; + static int jmxReporter; @BeforeClass public static void configureDummyCluster() throws Exception { configureCluster(0).configure(); + jmxReporter = JmxUtil.findFirstMBeanServer() != null ? 1 : 0; } @Before @@ -67,7 +70,7 @@ public void testExplicitConfiguration() throws Exception { cluster.getJettySolrRunners().forEach(jetty -> { CoreContainer cc = jetty.getCoreContainer(); // verify registry names - for (String name : cc.getCoreNames()) { + for (String name : cc.getLoadedCoreNames()) { SolrCore core = cc.getCore(name); try { String registryName = core.getCoreMetricManager().getRegistryName(); @@ -97,7 +100,7 @@ public void testExplicitConfiguration() throws Exception { assertEquals(5, sor.getPeriod()); for (String registryName : metricManager.registryNames(".*\\.shard[0-9]\\.replica.*")) { reporters = metricManager.getReporters(registryName); - assertEquals(reporters.toString(), 1, reporters.size()); + assertEquals(reporters.toString(), 1 + jmxReporter, reporters.size()); reporter = null; for (String name : reporters.keySet()) { if (name.startsWith("test")) { @@ -116,20 +119,20 @@ public void testExplicitConfiguration() throws Exception { assertEquals(reporters.toString(), 0, reporters.size()); // verify specific metrics Map metrics = metricManager.registry(registryName).getMetrics(); - String key = "QUERY./select.requests.count"; + String key = "QUERY./select.requests"; assertTrue(key, metrics.containsKey(key)); assertTrue(key, metrics.get(key) instanceof AggregateMetric); - key = "UPDATE./update/json.requests.count"; + key = "UPDATE./update/json.requests"; assertTrue(key, metrics.containsKey(key)); assertTrue(key, metrics.get(key) instanceof AggregateMetric); } if (metricManager.registryNames().contains("solr.cluster")) { clusterRegistries++; Map metrics = metricManager.registry("solr.cluster").getMetrics(); - String key = "jvm.memory.heap.init.value"; + String key = "jvm.memory.heap.init"; assertTrue(key, metrics.containsKey(key)); assertTrue(key, metrics.get(key) instanceof AggregateMetric); - key = "leader.test_collection.shard1.UPDATE./update/json.requests.count.max"; + key = "leader.test_collection.shard1.UPDATE./update/json.requests.max"; assertTrue(key, metrics.containsKey(key)); assertTrue(key, metrics.get(key) instanceof AggregateMetric); } @@ -156,7 +159,7 @@ public void testDefaultPlugins() throws Exception { assertEquals(reporters.toString(), 0, reporters.size()); for (String registryName : metricManager.registryNames(".*\\.shard[0-9]\\.replica.*")) { reporters = metricManager.getReporters(registryName); - assertEquals(reporters.toString(), 0, reporters.size()); + assertEquals(reporters.toString(), 0 + jmxReporter, reporters.size()); } }); } diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrShardReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrShardReporterTest.java index 9ce37627cde1..51c2a5ddd891 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrShardReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/solr/SolrShardReporterTest.java @@ -63,7 +63,7 @@ public void test() throws Exception { for (JettySolrRunner jetty : jettys) { CoreContainer cc = jetty.getCoreContainer(); SolrMetricManager metricManager = cc.getMetricManager(); - for (final String coreName : cc.getCoreNames()) { + for (final String coreName : cc.getLoadedCoreNames()) { CoreDescriptor cd = cc.getCoreDescriptor(coreName); if (cd.getCloudDescriptor() == null) { // not a cloud collection continue; diff --git a/solr/core/src/test/org/apache/solr/rest/schema/TestFieldResource.java b/solr/core/src/test/org/apache/solr/rest/schema/TestFieldResource.java index d591b9ad0261..4f53609c9e16 100644 --- a/solr/core/src/test/org/apache/solr/rest/schema/TestFieldResource.java +++ b/solr/core/src/test/org/apache/solr/rest/schema/TestFieldResource.java @@ -23,7 +23,7 @@ public class TestFieldResource extends SolrRestletTestBase { public void testGetField() throws Exception { assertQ("/schema/fields/test_postv?indent=on&wt=xml&showDefaults=true", "count(/response/lst[@name='field']) = 1", - "count(/response/lst[@name='field']/*) = 17", + "count(/response/lst[@name='field']/*) = 18", "/response/lst[@name='field']/str[@name='name'] = 'test_postv'", "/response/lst[@name='field']/str[@name='type'] = 'text'", "/response/lst[@name='field']/bool[@name='indexed'] = 'true'", @@ -38,6 +38,7 @@ public void testGetField() throws Exception { "/response/lst[@name='field']/bool[@name='omitPositions'] = 'false'", "/response/lst[@name='field']/bool[@name='storeOffsetsWithPositions'] = 'false'", "/response/lst[@name='field']/bool[@name='multiValued'] = 'false'", + "/response/lst[@name='field']/bool[@name='large'] = 'false'", "/response/lst[@name='field']/bool[@name='required'] = 'false'", "/response/lst[@name='field']/bool[@name='tokenized'] = 'true'", "/response/lst[@name='field']/bool[@name='useDocValuesAsStored'] = 'true'"); diff --git a/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java b/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java index 93cbb0a4ae17..d95f144b27d1 100644 --- a/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java +++ b/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java @@ -124,7 +124,7 @@ public void testGetMaxChars() { public void testCopyFieldFunctionality() { SolrCore core = h.getCore(); - assertU(adoc("id", "10", "title", "test copy field", "text_en", "this is a simple test of the copy field functionality")); + assertU(adoc("id", "5", "title", "test copy field", "text_en", "this is a simple test of the copy field functionality")); assertU(commit()); Map args = new HashMap<>(); @@ -134,7 +134,7 @@ public void testCopyFieldFunctionality() assertQ("Make sure they got in", req ,"//*[@numFound='1']" - ,"//result/doc[1]/int[@name='id'][.='10']" + ,"//result/doc[1]/int[@name='id'][.='5']" ); args = new HashMap<>(); @@ -143,7 +143,7 @@ public void testCopyFieldFunctionality() req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); assertQ("dynamic source", req ,"//*[@numFound='1']" - ,"//result/doc[1]/int[@name='id'][.='10']" + ,"//result/doc[1]/int[@name='id'][.='5']" ,"//result/doc[1]/arr[@name='highlight']/str[.='this is a simple test of ']" ); diff --git a/solr/core/src/test/org/apache/solr/schema/TestHalfAndHalfDocValues.java b/solr/core/src/test/org/apache/solr/schema/TestHalfAndHalfDocValues.java new file mode 100644 index 000000000000..b0c395662177 --- /dev/null +++ b/solr/core/src/test/org/apache/solr/schema/TestHalfAndHalfDocValues.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.schema; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.MultiFields; +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.core.SolrCore; +import org.apache.solr.index.NoMergePolicyFactory; +import org.apache.solr.search.SolrIndexSearcher; +import org.apache.solr.util.RefCounted; +import org.junit.BeforeClass; + +/** + * Added in SOLR-10047 + */ +public class TestHalfAndHalfDocValues extends SolrTestCaseJ4 { + + @BeforeClass + public static void beforeTests() throws Exception { + // we need consistent segments that aren't merged because we want to have + // segments with and without docvalues + systemSetPropertySolrTestsMergePolicyFactory(NoMergePolicyFactory.class.getName()); + + // HACK: Don't use a RandomMergePolicy, but only use the mergePolicyFactory that we've just set + System.setProperty(SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICYFACTORY, "true"); + System.setProperty(SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICY, "false"); + + initCore("solrconfig-basic.xml", "schema-docValues.xml"); + + // sanity check our schema meets our expectations + final IndexSchema schema = h.getCore().getLatestSchema(); + for (String f : new String[]{"floatdv", "intdv", "doubledv", "longdv", "datedv", "stringdv", "booldv"}) { + final SchemaField sf = schema.getField(f); + assertFalse(f + " is multiValued, test is useless, who changed the schema?", + sf.multiValued()); + assertFalse(f + " is indexed, test is useless, who changed the schema?", + sf.indexed()); + assertTrue(f + " has no docValues, test is useless, who changed the schema?", + sf.hasDocValues()); + } + } + + public void setUp() throws Exception { + super.setUp(); + assertU(delQ("*:*")); + } + + public void testHalfAndHalfDocValues() throws Exception { + // Insert two docs without docvalues + String fieldname = "string_add_dv_later"; + assertU(adoc("id", "3", fieldname, "c")); + assertU(commit()); + assertU(adoc("id", "1", fieldname, "a")); + assertU(commit()); + + + try (SolrCore core = h.getCoreInc()) { + assertFalse(core.getLatestSchema().getField(fieldname).hasDocValues()); + // Add docvalues to the field type + IndexSchema schema = core.getLatestSchema(); + SchemaField oldField = schema.getField(fieldname); + int newProperties = oldField.getProperties() | SchemaField.DOC_VALUES; + + SchemaField sf = new SchemaField(fieldname, oldField.getType(), newProperties, null); + schema.getFields().put(fieldname, sf); + + // Insert a new doc with docvalues + assertU(adoc("id", "2", fieldname, "b")); + assertU(commit()); + + + // Check there are a mix of segments with and without docvalues + final RefCounted searcherRef = core.openNewSearcher(true, true); + final SolrIndexSearcher searcher = searcherRef.get(); + try { + final DirectoryReader topReader = searcher.getRawReader(); + + //Assert no merges + + assertEquals(3, topReader.numDocs()); + assertEquals(3, topReader.leaves().size()); + + final FieldInfos infos = MultiFields.getMergedFieldInfos(topReader); + //The global field type should have docValues because a document with dvs was added + assertEquals(DocValuesType.SORTED, infos.fieldInfo(fieldname).getDocValuesType()); + + for (LeafReaderContext ctx : topReader.leaves()) { + LeafReader r = ctx.reader(); + //Make sure there were no merges + assertEquals(1, r.numDocs()); + Document doc = r.document(0); + String id = doc.getField("id").stringValue(); + + if (id.equals("1") || id.equals("3")) { + assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType()); + } else { + assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType()); + } + + } + } finally { + searcherRef.decref(); + } + } + + // Assert sort order is correct + assertQ(req("q", "string_add_dv_later:*", "sort", "string_add_dv_later asc"), + "//*[@numFound='3']", + "//result/doc[1]/int[@name='id'][.=1]", + "//result/doc[2]/int[@name='id'][.=2]", + "//result/doc[3]/int[@name='id'][.=3]" + ); + } + +} diff --git a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java index de458445fe4d..02d2ac24ae76 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java +++ b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java @@ -17,6 +17,8 @@ package org.apache.solr.schema; import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; @@ -25,6 +27,7 @@ import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.SortedSet; import java.util.TreeSet; import org.apache.lucene.document.Document; @@ -47,8 +50,11 @@ import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.common.params.CommonParams; +import org.apache.solr.index.SlowCompositeReaderWrapper; import org.apache.solr.schema.IndexSchema.DynamicField; import org.apache.solr.search.SolrIndexSearcher; +import org.apache.solr.search.SolrQueryParser; import org.apache.solr.util.DateMathParser; import org.apache.solr.util.RefCounted; import org.junit.After; @@ -57,7 +63,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.ibm.icu.text.SimpleDateFormat; /** * Tests for PointField functionality @@ -118,8 +123,36 @@ public void testIntPointFieldNonSearchableRangeQuery() throws Exception { } @Test - public void testIntPointFieldSort() throws Exception { - doTestPointFieldSort("number_p_i", "number_p_i_dv", new String[]{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}); + public void testIntPointFieldSortAndFunction() throws Exception { + + final SortedSet regexToTest = dynFieldRegexesForType(IntPointField.class); + final String[] sequential = new String[]{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}; + + for (String r : Arrays.asList("*_p_i", "*_p_i_dv", "*_p_i_dv_ns", "*_p_i_ni_dv", + "*_p_i_ni_dv_ns", "*_p_i_ni_ns_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSort(r.replace("*","number"), sequential); + // TODO: test some randomly generated (then sorted) arrays (with dups and/or missing values) + + doTestIntPointFunctionQuery(r.replace("*","number"), "int"); + } + + for (String r : Arrays.asList("*_p_i_ni", "*_p_i_ni_ns")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "w/o docValues", "42"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "w/o docValues", "42"); + } + + for (String r : Arrays.asList("*_p_i_mv", "*_p_i_ni_mv", "*_p_i_ni_mv_dv", "*_p_i_ni_dv_ns_mv", + "*_p_i_ni_ns_mv", "*_p_i_dv_ns_mv", "*_p_i_mv_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "42"); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "42", "666"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "42"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "42", "666"); + } + + assertEquals("Missing types in the test", Collections.emptySet(), regexToTest); } @Test @@ -131,13 +164,6 @@ public void testIntPointFieldFacetField() throws Exception { public void testIntPointFieldRangeFacet() throws Exception { doTestIntPointFieldRangeFacet("number_p_i_dv", "number_p_i"); } - - - @Test - public void testIntPointFunctionQuery() throws Exception { - doTestIntPointFunctionQuery("number_p_i_dv", "number_p_i", "int"); - } - @Test public void testIntPointStats() throws Exception { @@ -218,9 +244,9 @@ public void testMultiValuedIntPointFieldsAtomicUpdates() throws Exception { @Test public void testIntPointSetQuery() throws Exception { - doTestSetQueries("number_p_i", getRandomStringArrayWithInts(10, false), false); - doTestSetQueries("number_p_i_mv", getRandomStringArrayWithInts(10, false), true); - doTestSetQueries("number_p_i_ni_dv", getRandomStringArrayWithInts(10, false), false); + doTestSetQueries("number_p_i", getRandomStringArrayWithInts(20, false), false); + doTestSetQueries("number_p_i_mv", getRandomStringArrayWithInts(20, false), true); + doTestSetQueries("number_p_i_ni_dv", getRandomStringArrayWithInts(20, false), false); } // DoublePointField @@ -272,9 +298,40 @@ public void testDoubleFieldNonSearchableRangeQuery() throws Exception { @Test - public void testDoublePointFieldSort() throws Exception { - String[] arr = getRandomStringArrayWithDoubles(10, true); - doTestPointFieldSort("number_p_d", "number_p_d_dv", arr); + public void testDoublePointFieldSortAndFunction() throws Exception { + final SortedSet regexToTest = dynFieldRegexesForType(DoublePointField.class); + final String[] sequential = new String[]{"0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0"}; + final String[] randstrs = getRandomStringArrayWithDoubles(10, true); + + for (String r : Arrays.asList("*_p_d", "*_p_d_dv", "*_p_d_dv_ns", "*_p_d_ni_dv", + "*_p_d_ni_dv_ns", "*_p_d_ni_ns_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSort(r.replace("*","number"), sequential); + doTestPointFieldSort(r.replace("*","number"), randstrs); + // TODO: test some randomly generated (then sorted) arrays (with dups and/or missing values) + + doTestFloatPointFunctionQuery(r.replace("*","number"), "double"); + } + + for (String r : Arrays.asList("*_p_d_ni", "*_p_d_ni_ns")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "w/o docValues", "42.34"); + + doTestPointFieldFunctionQueryError(r.replace("*","number"), "w/o docValues", "42.34"); + } + + for (String r : Arrays.asList("*_p_d_mv", "*_p_d_ni_mv", "*_p_d_ni_mv_dv", "*_p_d_ni_dv_ns_mv", + "*_p_d_ni_ns_mv", "*_p_d_dv_ns_mv", "*_p_d_mv_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "42.34"); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "42.34", "66.6"); + + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "42.34"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "42.34", "66.6"); + } + + assertEquals("Missing types in the test", Collections.emptySet(), regexToTest); + } @Test @@ -290,11 +347,6 @@ public void testDoublePointFieldRangeFacet() throws Exception { doTestFloatPointFieldRangeFacet("number_p_d_dv", "number_p_d"); } - @Test - public void testDoublePointFunctionQuery() throws Exception { - doTestFloatPointFunctionQuery("number_p_d_dv", "number_p_d", "double"); - } - @Test public void testDoublePointStats() throws Exception { testPointStats("number_p_d", "number_p_d_dv", new String[]{"-10.0", "1.1", "2.2", "3.3", "4.4", "5.5", "6.6", "7.7", "8.8", "9.9"}, @@ -408,9 +460,9 @@ private void doTestFloatPointFieldsAtomicUpdates(String field, String type) thro @Test public void testDoublePointSetQuery() throws Exception { - doTestSetQueries("number_p_d", getRandomStringArrayWithDoubles(10, false), false); - doTestSetQueries("number_p_d_mv", getRandomStringArrayWithDoubles(10, false), true); - doTestSetQueries("number_p_d_ni_dv", getRandomStringArrayWithDoubles(10, false), false); + doTestSetQueries("number_p_d", getRandomStringArrayWithDoubles(20, false), false); + doTestSetQueries("number_p_d_mv", getRandomStringArrayWithDoubles(20, false), true); + doTestSetQueries("number_p_d_ni_dv", getRandomStringArrayWithDoubles(20, false), false); } // Float @@ -460,9 +512,40 @@ public void testFloatPointFieldNonSearchableRangeQuery() throws Exception { } @Test - public void testFloatPointFieldSort() throws Exception { - String[] arr = getRandomStringArrayWithFloats(10, true); - doTestPointFieldSort("number_p_f", "number_p_f_dv", arr); + public void testFloatPointFieldSortAndFunction() throws Exception { + final SortedSet regexToTest = dynFieldRegexesForType(FloatPointField.class); + final String[] sequential = new String[]{"0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0"}; + final String[] randstrs = getRandomStringArrayWithFloats(10, true); + + for (String r : Arrays.asList("*_p_f", "*_p_f_dv", "*_p_f_dv_ns", "*_p_f_ni_dv", + "*_p_f_ni_dv_ns", "*_p_f_ni_ns_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSort(r.replace("*","number"), sequential); + doTestPointFieldSort(r.replace("*","number"), randstrs); + // TODO: test some randomly generated (then sorted) arrays (with dups and/or missing values) + + doTestFloatPointFunctionQuery(r.replace("*","number"), "float"); + } + + for (String r : Arrays.asList("*_p_f_ni", "*_p_f_ni_ns")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "w/o docValues", "42.34"); + + doTestPointFieldFunctionQueryError(r.replace("*","number"), "w/o docValues", "42.34"); + } + + for (String r : Arrays.asList("*_p_f_mv", "*_p_f_ni_mv", "*_p_f_ni_mv_dv", "*_p_f_ni_dv_ns_mv", + "*_p_f_ni_ns_mv", "*_p_f_dv_ns_mv", "*_p_f_mv_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "42.34"); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "42.34", "66.6"); + + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "42.34"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "42.34", "66.6"); + } + + assertEquals("Missing types in the test", Collections.emptySet(), regexToTest); + } @Test @@ -478,11 +561,6 @@ public void testFloatPointFieldRangeFacet() throws Exception { doTestFloatPointFieldRangeFacet("number_p_f_dv", "number_p_f"); } - @Test - public void testFloatPointFunctionQuery() throws Exception { - doTestFloatPointFunctionQuery("number_p_f_dv", "number_p_f", "float"); - } - @Test public void testFloatPointStats() throws Exception { testPointStats("number_p_f", "number_p_f_dv", new String[]{"-10.0", "1.1", "2.2", "3.3", "4.4", "5.5", "6.6", "7.7", "8.8", "9.9"}, @@ -557,9 +635,9 @@ public void testMultiValuedFloatePointFieldsAtomicUpdates() throws Exception { @Test public void testFloatPointSetQuery() throws Exception { - doTestSetQueries("number_p_f", getRandomStringArrayWithFloats(10, false), false); - doTestSetQueries("number_p_f_mv", getRandomStringArrayWithFloats(10, false), true); - doTestSetQueries("number_p_f_ni_dv", getRandomStringArrayWithFloats(10, false), false); + doTestSetQueries("number_p_f", getRandomStringArrayWithFloats(20, false), false); + doTestSetQueries("number_p_f_mv", getRandomStringArrayWithFloats(20, false), true); + doTestSetQueries("number_p_f_ni_dv", getRandomStringArrayWithFloats(20, false), false); } @Test @@ -609,10 +687,38 @@ public void testLongPointFieldNonSearchableRangeQuery() throws Exception { } @Test - public void testLongPointFieldSort() throws Exception { - doTestPointFieldSort("number_p_l", "number_p_l_dv", new String[]{String.valueOf(Integer.MIN_VALUE), - "1", "2", "3", "4", "5", "6", "7", - String.valueOf(Integer.MAX_VALUE), String.valueOf(Long.MAX_VALUE)}); + public void testLongPointFieldSortAndFunction() throws Exception { + final SortedSet regexToTest = dynFieldRegexesForType(LongPointField.class); + final String[] vals = new String[]{ String.valueOf(Integer.MIN_VALUE), + "1", "2", "3", "4", "5", "6", "7", + String.valueOf(Integer.MAX_VALUE), String.valueOf(Long.MAX_VALUE)}; + + for (String r : Arrays.asList("*_p_l", "*_p_l_dv", "*_p_l_dv_ns", "*_p_l_ni_dv", + "*_p_l_ni_dv_ns", "*_p_l_ni_ns_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSort(r.replace("*","number"), vals); + // TODO: test some randomly generated (then sorted) arrays (with dups and/or missing values) + + doTestIntPointFunctionQuery(r.replace("*","number"), "long"); + } + + for (String r : Arrays.asList("*_p_l_ni", "*_p_l_ni_ns")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "w/o docValues", "4234"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "w/o docValues", "4234"); + } + + for (String r : Arrays.asList("*_p_l_mv", "*_p_l_ni_mv", "*_p_l_ni_mv_dv", "*_p_l_ni_dv_ns_mv", + "*_p_l_ni_ns_mv", "*_p_l_dv_ns_mv", "*_p_l_mv_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "4234"); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "4234", "66666666"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "4234"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "4234", "66666666"); + } + + assertEquals("Missing types in the test", Collections.emptySet(), regexToTest); + } @Test @@ -628,11 +734,6 @@ public void testLongPointFieldRangeFacet() throws Exception { doTestIntPointFieldRangeFacet("number_p_l_dv", "number_p_l"); } - @Test - public void testLongPointFunctionQuery() throws Exception { - doTestIntPointFunctionQuery("number_p_l_dv", "number_p_l", "long"); - } - @Test public void testLongPointStats() throws Exception { testPointStats("number_p_l", "number_p_l_dv", new String[]{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}, @@ -705,9 +806,9 @@ public void testMultiValuedLongPointFieldsAtomicUpdates() throws Exception { @Test public void testLongPointSetQuery() throws Exception { - doTestSetQueries("number_p_l", getRandomStringArrayWithLongs(10, false), false); - doTestSetQueries("number_p_l_mv", getRandomStringArrayWithLongs(10, false), true); - doTestSetQueries("number_p_l_ni_dv", getRandomStringArrayWithLongs(10, false), false); + doTestSetQueries("number_p_l", getRandomStringArrayWithLongs(20, false), false); + doTestSetQueries("number_p_l_mv", getRandomStringArrayWithLongs(20, false), true); + doTestSetQueries("number_p_l_ni_dv", getRandomStringArrayWithLongs(20, false), false); } @Test @@ -759,8 +860,39 @@ public void testDatePointFieldNonSearchableRangeQuery() throws Exception { } @Test - public void testDatePointFieldSort() throws Exception { - doTestPointFieldSort("number_p_dt", "number_p_dt_dv", getSequentialStringArrayWithDates(10)); + public void testDatePointFieldSortAndFunction() throws Exception { + final SortedSet regexToTest = dynFieldRegexesForType(DatePointField.class); + final String[] sequential = getSequentialStringArrayWithDates(10); + + for (String r : Arrays.asList("*_p_dt", "*_p_dt_dv", "*_p_dt_dv_ns", "*_p_dt_ni_dv", + "*_p_dt_ni_dv_ns", "*_p_dt_ni_ns_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSort(r.replace("*","number"), sequential); + // TODO: test some randomly generated (then sorted) arrays (with dups and/or missing values) + + doTestDatePointFunctionQuery(r.replace("*","number"), "date"); + } + + for (String r : Arrays.asList("*_p_dt_ni", "*_p_dt_ni_ns")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "w/o docValues", "1995-12-31T23:59:59Z"); + + doTestPointFieldFunctionQueryError(r.replace("*","number"), "w/o docValues", "1995-12-31T23:59:59Z"); + } + + for (String r : Arrays.asList("*_p_dt_mv", "*_p_dt_ni_mv", "*_p_dt_ni_mv_dv", "*_p_dt_ni_dv_ns_mv", + "*_p_dt_ni_ns_mv", "*_p_dt_dv_ns_mv", "*_p_dt_mv_dv")) { + assertTrue(r, regexToTest.remove(r)); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "1995-12-31T23:59:59Z"); + doTestPointFieldSortError(r.replace("*","number"), "multivalued", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z"); + + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "1995-12-31T23:59:59Z"); + doTestPointFieldFunctionQueryError(r.replace("*","number"), "multivalued", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z"); + + } + + assertEquals("Missing types in the test", Collections.emptySet(), regexToTest); + } @Test @@ -776,11 +908,6 @@ public void testDatePointFieldRangeFacet() throws Exception { doTestDatePointFieldRangeFacet("number_p_dt_dv", "number_p_dt"); } - @Test - public void testDatePointFunctionQuery() throws Exception { - doTestDatePointFunctionQuery("number_p_dt_dv", "number_p_dt", "date"); - } - @Test public void testDatePointStats() throws Exception { testDatePointStats("number_p_dt", "number_p_dt_dv", getSequentialStringArrayWithDates(10)); @@ -850,9 +977,9 @@ public void testMultiValuedDatePointFieldsAtomicUpdates() throws Exception { @Test public void testDatePointSetQuery() throws Exception { - doTestSetQueries("number_p_dt", getRandomStringArrayWithDates(10, false), false); - doTestSetQueries("number_p_dt_mv", getRandomStringArrayWithDates(10, false), true); - doTestSetQueries("number_p_dt_ni_dv", getRandomStringArrayWithDates(10, false), false); + doTestSetQueries("number_p_dt", getRandomStringArrayWithDates(20, false), false); + doTestSetQueries("number_p_dt_mv", getRandomStringArrayWithDates(20, false), true); + doTestSetQueries("number_p_dt_ni_dv", getRandomStringArrayWithDates(20, false), false); } @@ -885,7 +1012,7 @@ public void testIndexOrDocValuesQuery() throws Exception { public void testInternals() throws IOException { String[] types = new String[]{"i", "l", "f", "d"}; - String[] suffixes = new String[]{"", "_dv", "_mv", "_mv_dv", "_ni", "_ni_dv", "_ni_dv_ns", "_ni_mv", "_ni_mv_dv", "_ni_ns", "_ni_ns_mv", "_dv_ns", "_ni_ns_dv", "_dv_ns_mv"}; + String[] suffixes = new String[]{"", "_dv", "_mv", "_mv_dv", "_ni", "_ni_dv", "_ni_dv_ns", "_ni_dv_ns_mv", "_ni_mv", "_ni_mv_dv", "_ni_ns", "_ni_ns_mv", "_dv_ns", "_ni_ns_dv", "_dv_ns_mv"}; Set typesTested = new HashSet<>(); for (String type:types) { for (String suffix:suffixes) { @@ -897,18 +1024,29 @@ public void testInternals() throws IOException { doTestInternals("number_p_dt" + suffix, getSequentialStringArrayWithDates(10)); typesTested.add("*_p_dt" + suffix); } - - Set typesToTest = new HashSet<>(); - for (DynamicField dynField:h.getCore().getLatestSchema().getDynamicFields()) { - if (dynField.getPrototype().getType() instanceof PointField) { + + assertEquals("Missing types in the test", dynFieldRegexesForType(PointField.class), typesTested); + } + + // Helper methods + + /** + * Given a FieldType, return the list of DynamicField 'regexes' for all declared + * DynamicFields that use that FieldType. + * + * @see IndexSchema#getDynamicFields + * @see DynamicField#getRegex + */ + private static SortedSet dynFieldRegexesForType(final Class clazz) { + SortedSet typesToTest = new TreeSet<>(); + for (DynamicField dynField : h.getCore().getLatestSchema().getDynamicFields()) { + if (clazz.isInstance(dynField.getPrototype().getType())) { typesToTest.add(dynField.getRegex()); } } - assertEquals("Missing types in the test", typesTested, typesToTest); + return typesToTest; } - // Helper methods - private String[] getRandomStringArrayWithDoubles(int length, boolean sorted) { Set set; if (sorted) { @@ -1139,8 +1277,8 @@ private void testPointFieldReturn(String field, String type, String[] values) th assertU(commit()); String[] expected = new String[values.length + 1]; expected[0] = "//*[@numFound='" + values.length + "']"; - for (int i = 1; i <= values.length; i++) { - expected[i] = "//result/doc[" + i + "]/" + type + "[@name='" + field + "'][.='" + values[i-1] + "']"; + for (int i = 0; i < values.length; i++) { + expected[i + 1] = "//result/doc[str[@name='id']='" + i + "']/" + type + "[@name='" + field + "'][.='" + values[i] + "']"; } assertQ(req("q", "*:*", "fl", "id, " + field, "rows", String.valueOf(values.length)), expected); @@ -1348,43 +1486,69 @@ private void doTestIntPointFieldRangeFacet(String docValuesField, String nonDocV "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']", "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']"); } - - private void doTestIntPointFunctionQuery(String dvFieldName, String nonDvFieldName, String type) throws Exception { + + private void doTestIntPointFunctionQuery(String field, String type) throws Exception { for (int i = 9; i >= 0; i--) { - assertU(adoc("id", String.valueOf(i), dvFieldName, String.valueOf(i), nonDvFieldName, String.valueOf(i))); + assertU(adoc("id", String.valueOf(i), field, String.valueOf(i))); } assertU(commit()); - assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).getType() instanceof PointField); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "sort", "product(-1," + dvFieldName + ") asc"), + assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof PointField); + + assertQ(req("q", "*:*", "fl", "id, " + field, "sort", "product(-1," + field + ") asc"), "//*[@numFound='10']", - "//result/doc[1]/" + type + "[@name='" + dvFieldName + "'][.='9']", - "//result/doc[2]/" + type + "[@name='" + dvFieldName + "'][.='8']", - "//result/doc[3]/" + type + "[@name='" + dvFieldName + "'][.='7']", - "//result/doc[10]/" + type + "[@name='" + dvFieldName + "'][.='0']"); + "//result/doc[1]/" + type + "[@name='" + field + "'][.='9']", + "//result/doc[2]/" + type + "[@name='" + field + "'][.='8']", + "//result/doc[3]/" + type + "[@name='" + field + "'][.='7']", + "//result/doc[10]/" + type + "[@name='" + field + "'][.='0']"); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", product(-1," + dvFieldName + ")", "sort", "id asc"), + assertQ(req("q", "*:*", "fl", "id, " + field + ", product(-1," + field + ")", "sort", "id asc"), "//*[@numFound='10']", - "//result/doc[1]/float[@name='product(-1," + dvFieldName + ")'][.='-0.0']", - "//result/doc[2]/float[@name='product(-1," + dvFieldName + ")'][.='-1.0']", - "//result/doc[3]/float[@name='product(-1," + dvFieldName + ")'][.='-2.0']", - "//result/doc[10]/float[@name='product(-1," + dvFieldName + ")'][.='-9.0']"); + "//result/doc[1]/float[@name='product(-1," + field + ")'][.='-0.0']", + "//result/doc[2]/float[@name='product(-1," + field + ")'][.='-1.0']", + "//result/doc[3]/float[@name='product(-1," + field + ")'][.='-2.0']", + "//result/doc[10]/float[@name='product(-1," + field + ")'][.='-9.0']"); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", field(" + dvFieldName + ")", "sort", "id asc"), + assertQ(req("q", "*:*", "fl", "id, " + field + ", field(" + field + ")", "sort", "id asc"), "//*[@numFound='10']", - "//result/doc[1]/" + type + "[@name='field(" + dvFieldName + ")'][.='0']", - "//result/doc[2]/" + type + "[@name='field(" + dvFieldName + ")'][.='1']", - "//result/doc[3]/" + type + "[@name='field(" + dvFieldName + ")'][.='2']", - "//result/doc[10]/" + type + "[@name='field(" + dvFieldName + ")'][.='9']"); + "//result/doc[1]/" + type + "[@name='field(" + field + ")'][.='0']", + "//result/doc[2]/" + type + "[@name='field(" + field + ")'][.='1']", + "//result/doc[3]/" + type + "[@name='field(" + field + ")'][.='2']", + "//result/doc[10]/" + type + "[@name='field(" + field + ")'][.='9']"); - assertFalse(h.getCore().getLatestSchema().getField(nonDvFieldName).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(nonDvFieldName).getType() instanceof PointField); + } - assertQEx("Expecting Exception", - "sort param could not be parsed as a query", - req("q", "*:*", "fl", "id, " + nonDvFieldName, "sort", "product(-1," + nonDvFieldName + ") asc"), - SolrException.ErrorCode.BAD_REQUEST); + /** + * Checks that the specified field can not be used as a value source, even if there are documents + * with (all) the specified values in the index. + * + * @param field the field name to try and sort on + * @param errSubStr substring to look for in the error msg + * @param values one or more values to put into the doc(s) in the index - may be more then one for multivalued fields + */ + private void doTestPointFieldFunctionQueryError(String field, String errSubStr, String...values) throws Exception { + final int numDocs = atLeast(random(), 10); + for (int i = 0; i < numDocs; i++) { + SolrInputDocument doc = sdoc("id", String.valueOf(i)); + for (String v: values) { + doc.addField(field, v); + } + assertU(adoc(doc)); + } + + assertQEx("Should not be able to use field in function: " + field, errSubStr, + req("q", "*:*", "fl", "id", "fq", "{!frange l=0 h=100}product(-1, " + field + ")"), + SolrException.ErrorCode.BAD_REQUEST); + + clearIndex(); + assertU(commit()); + + // empty index should (also) give same error + assertQEx("Should not be able to use field in function: " + field, errSubStr, + req("q", "*:*", "fl", "id", "fq", "{!frange l=0 h=100}product(-1, " + field + ")"), + SolrException.ErrorCode.BAD_REQUEST); + } + private void testPointStats(String field, String dvField, String[] numbers, double min, double max, String count, String missing, double delta) { String minMin = String.valueOf(min - Math.abs(delta*min)); @@ -1891,30 +2055,86 @@ private void doTestFloatPointFieldExactQuery(String field, final boolean searcha clearIndex(); assertU(commit()); } - - private void doTestPointFieldSort(String field, String dvField, String[] arr) throws Exception { - assert arr != null && arr.length == 10; - for (int i = arr.length-1; i >= 0; i--) { - assertU(adoc("id", String.valueOf(i), dvField, String.valueOf(arr[i]), field, String.valueOf(arr[i]))); + + /** + * For each value, creates a doc with that value in the specified field and then asserts that + * asc/desc sorts on that field succeeds and that the docs are in the (relatively) expected order + * + * @param field name of field to sort on + * @param values list of values in ascending order + */ + private void doTestPointFieldSort(String field, String... values) throws Exception { + assert values != null && 2 <= values.length; + + // TODO: need to add sort missing coverage... + // + // idea: accept "null" as possible value for sort missing tests ? + // + // need to account for possibility that multiple nulls will be in non deterministic order + // always using secondary sort on id seems prudent ... handles any "dups" in values[] + + final List docs = new ArrayList<>(values.length); + final String[] ascXpathChecks = new String[values.length + 1]; + final String[] descXpathChecks = new String[values.length + 1]; + ascXpathChecks[values.length] = "//*[@numFound='" + values.length + "']"; + descXpathChecks[values.length] = "//*[@numFound='" + values.length + "']"; + + for (int i = values.length-1; i >= 0; i--) { + docs.add(sdoc("id", String.valueOf(i), field, String.valueOf(values[i]))); + // reminder: xpath array indexes start at 1 + ascXpathChecks[i]= "//result/doc["+ (1 + i)+"]/str[@name='id'][.='"+i+"']"; + descXpathChecks[i]= "//result/doc["+ (values.length - i) +"]/str[@name='id'][.='"+i+"']"; + } + + // ensure doc add order doesn't affect results + Collections.shuffle(docs, random()); + for (SolrInputDocument doc : docs) { + assertU(adoc(doc)); } assertU(commit()); - assertTrue(h.getCore().getLatestSchema().getField(dvField).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(dvField).getType() instanceof PointField); - assertQ(req("q", "*:*", "fl", "id", "sort", dvField + " desc"), - "//*[@numFound='10']", - "//result/doc[1]/str[@name='id'][.='9']", - "//result/doc[2]/str[@name='id'][.='8']", - "//result/doc[3]/str[@name='id'][.='7']", - "//result/doc[10]/str[@name='id'][.='0']"); + + assertQ(req("q", "*:*", "fl", "id", "sort", field + " asc"), + ascXpathChecks); + assertQ(req("q", "*:*", "fl", "id", "sort", field + " desc"), + descXpathChecks); + + + clearIndex(); + assertU(commit()); + } + + + /** + * Checks that the specified field can not be sorted on, even if there are documents + * with (all) the specified values in the index. + * + * @param field the field name to try and sort on + * @param errSubStr substring to look for in the error msg + * @param values one or more values to put into the doc(s) in the index - may be more then one for multivalued fields + */ + private void doTestPointFieldSortError(String field, String errSubStr, String... values) throws Exception { + + final int numDocs = atLeast(random(), 10); + for (int i = 0; i < numDocs; i++) { + SolrInputDocument doc = sdoc("id", String.valueOf(i)); + for (String v: values) { + doc.addField(field, v); + } + assertU(adoc(doc)); + } + + assertQEx("Should not be able to sort on field: " + field, errSubStr, + req("q", "*:*", "fl", "id", "sort", field + " desc"), + SolrException.ErrorCode.BAD_REQUEST); - assertFalse(h.getCore().getLatestSchema().getField(field).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof PointField); - assertQEx("Expecting Exception", - "can not sort on a PointField without doc values: " + field, - req("q", "*:*", "fl", "id", "sort", field + " desc"), - SolrException.ErrorCode.BAD_REQUEST); + clearIndex(); + assertU(commit()); + + // empty index should (also) give same error + assertQEx("Should not be able to sort on field: " + field, errSubStr, + req("q", "*:*", "fl", "id", "sort", field + " desc"), + SolrException.ErrorCode.BAD_REQUEST); - //TODO: sort missing } private void doTestFloatPointFieldRangeQuery(String fieldName, String type, boolean testDouble) throws Exception { @@ -2044,42 +2264,33 @@ private void doTestFloatPointFieldRangeFacet(String docValuesField, String nonDo "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']", "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']"); } - - private void doTestFloatPointFunctionQuery(String dvFieldName, String nonDvFieldName, String type) throws Exception { + + private void doTestFloatPointFunctionQuery(String field, String type) throws Exception { for (int i = 9; i >= 0; i--) { - assertU(adoc("id", String.valueOf(i), dvFieldName, String.format(Locale.ROOT, "%f", (float)i*1.1), nonDvFieldName, String.format(Locale.ROOT, "%f", (float)i*1.1))); + assertU(adoc("id", String.valueOf(i), field, String.format(Locale.ROOT, "%f", (float)i*1.1))); } assertU(commit()); - assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).getType() instanceof PointField); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "sort", "product(-1," + dvFieldName + ") asc"), + assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof PointField); + assertQ(req("q", "*:*", "fl", "id, " + field, "sort", "product(-1," + field + ") asc"), "//*[@numFound='10']", - "//result/doc[1]/" + type + "[@name='" + dvFieldName + "'][.='9.9']", - "//result/doc[2]/" + type + "[@name='" + dvFieldName + "'][.='8.8']", - "//result/doc[3]/" + type + "[@name='" + dvFieldName + "'][.='7.7']", - "//result/doc[10]/" + type + "[@name='" + dvFieldName + "'][.='0.0']"); + "//result/doc[1]/" + type + "[@name='" + field + "'][.='9.9']", + "//result/doc[2]/" + type + "[@name='" + field + "'][.='8.8']", + "//result/doc[3]/" + type + "[@name='" + field + "'][.='7.7']", + "//result/doc[10]/" + type + "[@name='" + field + "'][.='0.0']"); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", product(-1," + dvFieldName + ")", "sort", "id asc"), + assertQ(req("q", "*:*", "fl", "id, " + field + ", product(-1," + field + ")", "sort", "id asc"), "//*[@numFound='10']", - "//result/doc[1]/float[@name='product(-1," + dvFieldName + ")'][.='-0.0']", - "//result/doc[2]/float[@name='product(-1," + dvFieldName + ")'][.='-1.1']", - "//result/doc[3]/float[@name='product(-1," + dvFieldName + ")'][.='-2.2']", - "//result/doc[10]/float[@name='product(-1," + dvFieldName + ")'][.='-9.9']"); + "//result/doc[1]/float[@name='product(-1," + field + ")'][.='-0.0']", + "//result/doc[2]/float[@name='product(-1," + field + ")'][.='-1.1']", + "//result/doc[3]/float[@name='product(-1," + field + ")'][.='-2.2']", + "//result/doc[10]/float[@name='product(-1," + field + ")'][.='-9.9']"); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", field(" + dvFieldName + ")", "sort", "id asc"), + assertQ(req("q", "*:*", "fl", "id, " + field + ", field(" + field + ")", "sort", "id asc"), "//*[@numFound='10']", - "//result/doc[1]/" + type + "[@name='field(" + dvFieldName + ")'][.='0.0']", - "//result/doc[2]/" + type + "[@name='field(" + dvFieldName + ")'][.='1.1']", - "//result/doc[3]/" + type + "[@name='field(" + dvFieldName + ")'][.='2.2']", - "//result/doc[10]/" + type + "[@name='field(" + dvFieldName + ")'][.='9.9']"); - - assertFalse(h.getCore().getLatestSchema().getField(nonDvFieldName).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(nonDvFieldName).getType() instanceof PointField); - - assertQEx("Expecting Exception", - "sort param could not be parsed as a query", - req("q", "*:*", "fl", "id, " + nonDvFieldName, "sort", "product(-1," + nonDvFieldName + ") asc"), - SolrException.ErrorCode.BAD_REQUEST); + "//result/doc[1]/" + type + "[@name='field(" + field + ")'][.='0.0']", + "//result/doc[2]/" + type + "[@name='field(" + field + ")'][.='1.1']", + "//result/doc[3]/" + type + "[@name='field(" + field + ")'][.='2.2']", + "//result/doc[10]/" + type + "[@name='field(" + field + ")'][.='9.9']"); } private void doTestSetQueries(String fieldName, String[] values, boolean multiValued) { @@ -2087,7 +2298,8 @@ private void doTestSetQueries(String fieldName, String[] values, boolean multiVa assertU(adoc("id", String.valueOf(i), fieldName, values[i])); } assertU(commit()); - assertTrue(h.getCore().getLatestSchema().getField(fieldName).getType() instanceof PointField); + SchemaField sf = h.getCore().getLatestSchema().getField(fieldName); + assertTrue(sf.getType() instanceof PointField); for (int i = 0; i < values.length; i++) { assertQ(req("q", "{!term f='" + fieldName + "'}" + values[i], "fl", "id," + fieldName), @@ -2099,6 +2311,27 @@ private void doTestSetQueries(String fieldName, String[] values, boolean multiVa "//*[@numFound='2']"); } + assertTrue(values.length > SolrQueryParser.TERMS_QUERY_THRESHOLD); + int numTerms = SolrQueryParser.TERMS_QUERY_THRESHOLD + 1; + StringBuilder builder = new StringBuilder(fieldName + ":("); + for (int i = 0; i < numTerms; i++) { + if (sf.getType().getNumberType() == NumberType.DATE) { + builder.append(String.valueOf(values[i]).replace(":", "\\:") + ' '); + } else { + builder.append(String.valueOf(values[i]).replace("-", "\\-") + ' '); + } + } + builder.append(')'); + if (sf.indexed()) { // SolrQueryParser should also be generating a PointInSetQuery if indexed + assertQ(req(CommonParams.DEBUG, CommonParams.QUERY, "q", "*:*", "fq", builder.toString(), "fl", "id," + fieldName), + "//*[@numFound='" + numTerms + "']", + "//*[@name='parsed_filter_queries']/str[.='(" + getSetQueryToString(fieldName, values, numTerms) + ")']"); + } else { + // Won't use PointInSetQuery if the fiels is not indexed, but should match the same docs + assertQ(req(CommonParams.DEBUG, CommonParams.QUERY, "q", "*:*", "fq", builder.toString(), "fl", "id," + fieldName), + "//*[@numFound='" + numTerms + "']"); + } + if (multiValued) { clearIndex(); assertU(commit()); @@ -2118,6 +2351,11 @@ private void doTestSetQueries(String fieldName, String[] values, boolean multiVa } } + private String getSetQueryToString(String fieldName, String[] values, int numTerms) { + SchemaField sf = h.getCore().getLatestSchema().getField(fieldName); + return sf.getType().getSetQuery(null, sf, Arrays.asList(Arrays.copyOf(values, numTerms))).toString(); + } + private void doTestDoublePointFieldMultiValuedRangeFacet(String docValuesField, String nonDocValuesField) throws Exception { for (int i = 0; i < 10; i++) { assertU(adoc("id", String.valueOf(i), docValuesField, String.valueOf(i), docValuesField, String.valueOf(i + 10), @@ -2514,43 +2752,36 @@ private void doTestDatePointFieldMultiValuedRangeFacet(String docValuesField, St "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='1994-12-31T10:59:59Z'][.='0']"); } - private void doTestDatePointFunctionQuery(String dvFieldName, String nonDvFieldName, String type) throws Exception { - String baseDate = "1995-01-10T10:59:10Z"; + private void doTestDatePointFunctionQuery(String field, String nonDvFieldName) throws Exception { + final String baseDate = "1995-01-10T10:59:10Z"; + for (int i = 9; i >= 0; i--) { String date = String.format(Locale.ROOT, "%s+%dSECONDS", baseDate, i+1); - assertU(adoc("id", String.valueOf(i), dvFieldName, date, nonDvFieldName, date)); + assertU(adoc("id", String.valueOf(i), field, date)); } assertU(commit()); - assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).getType() instanceof PointField); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "sort", "product(-1,ms(" + dvFieldName + ")) asc"), + assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof DatePointField); + assertQ(req("q", "*:*", "fl", "id, " + field, "sort", "product(-1,ms(" + field + ")) asc"), "//*[@numFound='10']", - "//result/doc[1]/" + type + "[@name='" + dvFieldName + "'][.='1995-01-10T10:59:20Z']", - "//result/doc[2]/" + type + "[@name='" + dvFieldName + "'][.='1995-01-10T10:59:19Z']", - "//result/doc[3]/" + type + "[@name='" + dvFieldName + "'][.='1995-01-10T10:59:18Z']", - "//result/doc[10]/" + type + "[@name='" + dvFieldName + "'][.='1995-01-10T10:59:11Z']"); + "//result/doc[1]/date[@name='" + field + "'][.='1995-01-10T10:59:20Z']", + "//result/doc[2]/date[@name='" + field + "'][.='1995-01-10T10:59:19Z']", + "//result/doc[3]/date[@name='" + field + "'][.='1995-01-10T10:59:18Z']", + "//result/doc[10]/date[@name='" + field + "'][.='1995-01-10T10:59:11Z']"); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", ms(" + dvFieldName + ","+baseDate+")", "sort", "id asc"), + assertQ(req("q", "*:*", "fl", "id, " + field + ", ms(" + field + ","+baseDate+")", "sort", "id asc"), "//*[@numFound='10']", - "//result/doc[1]/float[@name='ms(" + dvFieldName + "," + baseDate + ")'][.='1000.0']", - "//result/doc[2]/float[@name='ms(" + dvFieldName + "," + baseDate + ")'][.='2000.0']", - "//result/doc[3]/float[@name='ms(" + dvFieldName + "," + baseDate + ")'][.='3000.0']", - "//result/doc[10]/float[@name='ms(" + dvFieldName + "," + baseDate + ")'][.='10000.0']"); + "//result/doc[1]/float[@name='ms(" + field + "," + baseDate + ")'][.='1000.0']", + "//result/doc[2]/float[@name='ms(" + field + "," + baseDate + ")'][.='2000.0']", + "//result/doc[3]/float[@name='ms(" + field + "," + baseDate + ")'][.='3000.0']", + "//result/doc[10]/float[@name='ms(" + field + "," + baseDate + ")'][.='10000.0']"); - assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", field(" + dvFieldName + ")", "sort", "id asc"), + assertQ(req("q", "*:*", "fl", "id, " + field + ", field(" + field + ")", "sort", "id asc"), "//*[@numFound='10']", - "//result/doc[1]/" + type + "[@name='field(" + dvFieldName + ")'][.='1995-01-10T10:59:11Z']", - "//result/doc[2]/" + type + "[@name='field(" + dvFieldName + ")'][.='1995-01-10T10:59:12Z']", - "//result/doc[3]/" + type + "[@name='field(" + dvFieldName + ")'][.='1995-01-10T10:59:13Z']", - "//result/doc[10]/" + type + "[@name='field(" + dvFieldName + ")'][.='1995-01-10T10:59:20Z']"); + "//result/doc[1]/date[@name='field(" + field + ")'][.='1995-01-10T10:59:11Z']", + "//result/doc[2]/date[@name='field(" + field + ")'][.='1995-01-10T10:59:12Z']", + "//result/doc[3]/date[@name='field(" + field + ")'][.='1995-01-10T10:59:13Z']", + "//result/doc[10]/date[@name='field(" + field + ")'][.='1995-01-10T10:59:20Z']"); - assertFalse(h.getCore().getLatestSchema().getField(nonDvFieldName).hasDocValues()); - assertTrue(h.getCore().getLatestSchema().getField(nonDvFieldName).getType() instanceof PointField); - - assertQEx("Expecting Exception", - "sort param could not be parsed as a query", - req("q", "*:*", "fl", "id, " + nonDvFieldName, "sort", "product(-1," + nonDvFieldName + ") asc"), - SolrException.ErrorCode.BAD_REQUEST); } private void testDatePointStats(String field, String dvField, String[] dates) { @@ -2648,6 +2879,9 @@ private void doTestInternals(String field, String[] values) throws IOException { ref = h.getCore().getSearcher(); SolrIndexSearcher searcher = ref.get(); ir = searcher.getIndexReader(); + // our own SlowCompositeReader to check DocValues on disk w/o the UninvertingReader added by SolrIndexSearcher + final LeafReader leafReaderForCheckingDVs = SlowCompositeReaderWrapper.wrap(searcher.getRawReader()); + if (sf.indexed()) { assertEquals("Field " + field + " should have point values", 10, PointValues.size(ir, field)); } else { @@ -2655,35 +2889,35 @@ private void doTestInternals(String field, String[] values) throws IOException { } if (ignoredField) { assertTrue("Field " + field + " should not have docValues", - DocValues.getSortedNumeric(searcher.getSlowAtomicReader(), field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + DocValues.getSortedNumeric(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); assertTrue("Field " + field + " should not have docValues", - DocValues.getNumeric(searcher.getSlowAtomicReader(), field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + DocValues.getNumeric(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); assertTrue("Field " + field + " should not have docValues", - DocValues.getSorted(searcher.getSlowAtomicReader(), field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + DocValues.getSorted(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); assertTrue("Field " + field + " should not have docValues", - DocValues.getBinary(searcher.getSlowAtomicReader(), field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + DocValues.getBinary(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); } else { if (sf.hasDocValues()) { if (sf.multiValued()) { assertFalse("Field " + field + " should have docValues", - DocValues.getSortedNumeric(searcher.getSlowAtomicReader(), field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + DocValues.getSortedNumeric(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); } else { assertFalse("Field " + field + " should have docValues", - DocValues.getNumeric(searcher.getSlowAtomicReader(), field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + DocValues.getNumeric(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS); } } else { - expectThrows(IllegalStateException.class, ()->DocValues.getSortedNumeric(searcher.getSlowAtomicReader(), field)); - expectThrows(IllegalStateException.class, ()->DocValues.getNumeric(searcher.getSlowAtomicReader(), field)); + expectThrows(IllegalStateException.class, ()->DocValues.getSortedNumeric(leafReaderForCheckingDVs, field)); + expectThrows(IllegalStateException.class, ()->DocValues.getNumeric(leafReaderForCheckingDVs, field)); } - expectThrows(IllegalStateException.class, ()->DocValues.getSorted(searcher.getSlowAtomicReader(), field)); - expectThrows(IllegalStateException.class, ()->DocValues.getBinary(searcher.getSlowAtomicReader(), field)); + expectThrows(IllegalStateException.class, ()->DocValues.getSorted(leafReaderForCheckingDVs, field)); + expectThrows(IllegalStateException.class, ()->DocValues.getBinary(leafReaderForCheckingDVs, field)); } for (LeafReaderContext leave:ir.leaves()) { LeafReader reader = leave.reader(); for (int i = 0; i < reader.numDocs(); i++) { - Document doc = reader.document(i, Collections.singleton(field)); + Document doc = reader.document(i); if (sf.stored()) { - assertNotNull(doc.get(field)); + assertNotNull("Field " + field + " not found. Doc: " + doc, doc.get(field)); } else { assertNull(doc.get(field)); } @@ -2692,42 +2926,42 @@ private void doTestInternals(String field, String[] values) throws IOException { } finally { ref.decref(); } + clearIndex(); + assertU(commit()); } public void testNonReturnable() throws Exception { - doTestNonReturnable("foo_p_i_ni_ns", "42"); - doTestNonReturnable("foo_p_i_ni_ns_mv", "42", "666"); + doTestReturnNonStored("foo_p_i_ni_ns", false, "42"); + doTestReturnNonStored("foo_p_i_ni_dv_ns", true, "42"); + doTestReturnNonStored("foo_p_i_ni_ns_mv", false, "42", "666"); + doTestReturnNonStored("foo_p_i_ni_dv_ns_mv", true, "42", "666"); - doTestNonReturnable("foo_p_l_ni_ns", "3333333333"); - doTestNonReturnable("foo_p_l_ni_ns_mv", "3333333333", "-4444444444"); + doTestReturnNonStored("foo_p_l_ni_ns", false, "3333333333"); + doTestReturnNonStored("foo_p_l_ni_dv_ns", true, "3333333333"); + doTestReturnNonStored("foo_p_l_ni_ns_mv", false, "3333333333", "-4444444444"); + doTestReturnNonStored("foo_p_l_ni_dv_ns_mv", true, "3333333333", "-4444444444"); - doTestNonReturnable("foo_p_f_ni_ns", "42.3"); - doTestNonReturnable("foo_p_f_ni_ns_mv", "42.3", "-66.6"); + doTestReturnNonStored("foo_p_f_ni_ns", false, "42.3"); + doTestReturnNonStored("foo_p_f_ni_dv_ns", true, "42.3"); + doTestReturnNonStored("foo_p_f_ni_ns_mv", false, "42.3", "-66.6"); + doTestReturnNonStored("foo_p_f_ni_dv_ns_mv", true, "42.3", "-66.6"); - doTestNonReturnable("foo_p_d_ni_ns", "42.3"); - doTestNonReturnable("foo_p_d_ni_ns_mv", "42.3", "-66.6"); - - doTestNonReturnable("foo_p_dt_ni_ns", "1995-12-31T23:59:59Z"); - doTestNonReturnable("foo_p_dt_ni_ns_mv", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z+3DAYS"); - - } + doTestReturnNonStored("foo_p_d_ni_ns", false, "42.3"); + doTestReturnNonStored("foo_p_d_ni_dv_ns", true, "42.3"); + doTestReturnNonStored("foo_p_d_ni_ns_mv", false, "42.3", "-66.6"); + doTestReturnNonStored("foo_p_d_ni_dv_ns_mv", true, "42.3", "-66.6"); - @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-10437") - public void testNonReturnableDocValues() throws Exception { - // TODO: once SOLR-10437 is resolved, this test method can be folded into testNonReturnable() - - // these fields are stored=false, docValues=true, useDocValuesAsStored=false and yet they are - // still returned and failing this test. - - doTestNonReturnable("foo_p_i_ni_dv_ns", "42"); - doTestNonReturnable("foo_p_l_ni_dv_ns", "3333333333"); - doTestNonReturnable("foo_p_f_ni_dv_ns", "42.3"); - doTestNonReturnable("foo_p_d_ni_dv_ns", "42.3"); - doTestNonReturnable("foo_p_dt_ni_dv_ns", "1995-12-31T23:59:59Z"); + doTestReturnNonStored("foo_p_dt_ni_ns", false, "1995-12-31T23:59:59Z"); + doTestReturnNonStored("foo_p_dt_ni_dv_ns", true, "1995-12-31T23:59:59Z"); + doTestReturnNonStored("foo_p_dt_ni_ns_mv", false, "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z+3DAYS"); + doTestReturnNonStored("foo_p_dt_ni_dv_ns_mv", true, "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z+3DAYS"); } - public void doTestNonReturnable(final String fieldName, final String... values) throws Exception { + public void doTestReturnNonStored(final String fieldName, boolean shouldReturnFieldIfRequested, final String... values) throws Exception { + final String RETURN_FIELD = "count(//doc/*[@name='" + fieldName + "'])=10"; + final String DONT_RETURN_FIELD = "count(//doc/*[@name='" + fieldName + "'])=0"; + assertFalse(h.getCore().getLatestSchema().getField(fieldName).stored()); for (int i=0; i < 10; i++) { SolrInputDocument doc = sdoc("id", String.valueOf(i)); for (String value : values) { @@ -2739,17 +2973,24 @@ public void doTestNonReturnable(final String fieldName, final String... values) assertQ(req("q", "*:*", "rows", "100", "fl", "id," + fieldName), "//*[@numFound='10']", "count(//doc)=10", // exactly 10 docs in response - "count(//doc/*)=10", // exactly 10 fields across all docs - "count(//doc/*[@name!='id'])=0"); // no field in any doc other then 'id' + (shouldReturnFieldIfRequested?RETURN_FIELD:DONT_RETURN_FIELD)); // no field in any doc other then 'id' + + assertQ(req("q", "*:*", "rows", "100", "fl", "*"), + "//*[@numFound='10']", + "count(//doc)=10", // exactly 10 docs in response + DONT_RETURN_FIELD); // no field in any doc other then 'id' + + assertQ(req("q", "*:*", "rows", "100"), + "//*[@numFound='10']", + "count(//doc)=10", // exactly 10 docs in response + DONT_RETURN_FIELD); // no field in any doc other then 'id' clearIndex(); assertU(commit()); } public void testWhiteboxCreateFields() throws Exception { - // TODO: we should have a "coverage" assert that we're looping over all the dynamic (point) fields in the schema - String[] typeNames = new String[]{"i", "l", "f", "d", "dt"}; - String[] suffixes = new String[]{"", "_dv", "_mv", "_mv_dv", "_ni", "_ni_dv", "_ni_dv_ns", "_ni_mv", "_ni_mv_dv", "_ni_ns", "_ni_ns_mv", "_dv_ns", "_ni_ns_dv", "_dv_ns_mv"}; + String[] suffixes = new String[]{"", "_dv", "_mv", "_mv_dv", "_ni", "_ni_dv", "_ni_dv_ns", "_ni_dv_ns_mv", "_ni_mv", "_ni_mv_dv", "_ni_ns", "_ni_ns_mv", "_dv_ns", "_ni_ns_dv", "_dv_ns_mv"}; Class[] expectedClasses = new Class[]{IntPoint.class, LongPoint.class, FloatPoint.class, DoublePoint.class, LongPoint.class}; Date dateToTest = new Date(); diff --git a/solr/core/src/test/org/apache/solr/schema/TestUseDocValuesAsStored.java b/solr/core/src/test/org/apache/solr/schema/TestUseDocValuesAsStored.java index 48a3f2266bc7..136d40ee24db 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestUseDocValuesAsStored.java +++ b/solr/core/src/test/org/apache/solr/schema/TestUseDocValuesAsStored.java @@ -107,13 +107,14 @@ private void initManagedSchemaCore() throws Exception { } @After - private void afterClass() throws Exception { + private void afterTest() throws Exception { + clearIndex(); + commit(); deleteCore(); System.clearProperty("managed.schema.mutable"); System.clearProperty("enable.update.log"); } - public String getCoreName() { return "basic"; } @@ -225,7 +226,6 @@ private String[] nextValues(int arity, String valueType) throws Exception { @Test public void testMultipleSearchResults() throws Exception { - // Three documents with different numbers of values for a field assertU(adoc("id", "myid1", "test_is_dvo", "101", "test_is_dvo", "102", "test_is_dvo", "103")); assertU(adoc("id", "myid2", "test_is_dvo", "201", "test_is_dvo", "202")); @@ -250,6 +250,34 @@ public void testMultipleSearchResults() throws Exception { + "{'id':'myid6','test_s_dvo':'hello'}" + "]"); } + + @Test + public void testUseDocValuesAsStoredFalse() throws Exception { + SchemaField sf = h.getCore().getLatestSchema().getField("nonstored_dv_str"); + assertNotNull(sf); + assertTrue(sf.hasDocValues()); + assertFalse(sf.useDocValuesAsStored()); + assertFalse(sf.stored()); + assertU(adoc("id", "myid", "nonstored_dv_str", "dont see me")); + assertU(commit()); + + assertJQ(req("q", "id:myid"), + "/response/docs==[" + + "{'id':'myid'}" + + "]"); + assertJQ(req("q", "id:myid", "fl", "*"), + "/response/docs==[" + + "{'id':'myid'}" + + "]"); + assertJQ(req("q", "id:myid", "fl", "id,nonstored_dv_*"), + "/response/docs==[" + + "{'id':'myid'}" + + "]"); + assertJQ(req("q", "id:myid", "fl", "id,nonstored_dv_str"), + "/response/docs==[" + + "{'id':'myid','nonstored_dv_str':'dont see me'}" + + "]"); + } public void testManagedSchema() throws Exception { IndexSchema oldSchema = h.getCore().getLatestSchema(); diff --git a/solr/core/src/test/org/apache/solr/search/MockSearchComponent.java b/solr/core/src/test/org/apache/solr/search/MockSearchComponent.java index 1539dfd919a1..874b21a8c6b7 100644 --- a/solr/core/src/test/org/apache/solr/search/MockSearchComponent.java +++ b/solr/core/src/test/org/apache/solr/search/MockSearchComponent.java @@ -46,10 +46,4 @@ public void process(ResponseBuilder rb) throws IOException { public String getDescription() { return "Mock search component for tests"; } - - @Override - public String getSource() { - return ""; - } - } diff --git a/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java b/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java index a9b5c6ee198d..22d758614e31 100644 --- a/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java +++ b/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java @@ -1099,7 +1099,8 @@ public void testAggs() throws Exception { assertFuncEquals("agg_hll(foo_i)", "agg_hll(foo_i)"); assertFuncEquals("agg_sumsq(foo_i)", "agg_sumsq(foo_i)"); assertFuncEquals("agg_percentile(foo_i,50)", "agg_percentile(foo_i,50)"); - // assertFuncEquals("agg_stdev(foo_i)", "agg_stdev(foo_i)"); + assertFuncEquals("agg_variance(foo_i)", "agg_variance(foo_i)"); + assertFuncEquals("agg_stddev(foo_i)", "agg_stddev(foo_i)"); // assertFuncEquals("agg_multistat(foo_i)", "agg_multistat(foo_i)"); } diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java index 6c7b0cf6b8c7..6a211d098548 100644 --- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java +++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java @@ -23,6 +23,8 @@ import java.util.Set; import java.util.stream.Stream; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -364,8 +366,16 @@ public void testFocusQueryParser() { assertQ(req("defType","edismax", "mm","0", "q","Terminator: 100", "qf","movies_t foo_i"), twor); - assertQ(req("defType","edismax", "mm","100%", "q","Terminator: 100", "qf","movies_t foo_i"), + assertQ(req("defType","edismax", "mm","100%", "q","Terminator: 100", "qf","movies_t foo_i", "sow","true"), nor); + // When sow=false, the per-field query structures differ (no "Terminator" query on integer field foo_i), + // so a dismax-per-field is constructed. As a result, mm=100% is applied per-field instead of per-term; + // since there is only one term (100) required in the foo_i field's dismax, the query can match docs that + // only have the 100 term in the foo_i field, and don't necessarily have "Terminator" in any field. + assertQ(req("defType","edismax", "mm","100%", "q","Terminator: 100", "qf","movies_t foo_i", "sow","false"), + oner); + assertQ(req("defType","edismax", "mm","100%", "q","Terminator: 100", "qf","movies_t foo_i"), // default sow=false + oner); assertQ(req("defType","edismax", "mm","100%", "q","Terminator: 8", "qf","movies_t foo_i"), oner); @@ -1413,19 +1423,21 @@ public void testSplitOnWhitespace_Basic() throws Exception { assertJQ(req("qf", "text_sw title", "defType","edismax", "q","wi fi", "sow","true") , "/response/numFound==0" ); - assertJQ(req("qf","text_sw title", "defType","edismax", "q","wi fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("qf","text_sw title", "defType","edismax", "q","wi fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='72'" ); assertJQ(req("qf","text_sw title", "q","{!edismax sow=false}wi fi") , "/response/numFound==1" , "/response/docs/[0]/id=='72'" ); - assertJQ(req("df", "text_sw title", "q","{!edismax sow=true}wi fi") + assertJQ(req("qf", "text_sw title", "q","{!edismax sow=true}wi fi") , "/response/numFound==0" ); - assertJQ(req("df", "text_sw title", "q", "{!edismax}wi fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("qf", "text_sw title", "q", "{!edismax}wi fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='72'" ); assertQ(req("qf", "name title", @@ -1451,7 +1463,7 @@ public void testSplitOnWhitespace_Basic() throws Exception { assertQ(req("qf", "name title", "q", "barking curds of stigma", "defType", "edismax", - "debugQuery", "true"), // Default sow=true + "debugQuery", "true"), // Default sow=false "//str[@name='parsedquery'][contains(.,'DisjunctionMaxQuery((name:barking | title:barking))')]", "//str[@name='parsedquery'][contains(.,'DisjunctionMaxQuery((name:curds | title:curds))')]", "//str[@name='parsedquery'][contains(.,'DisjunctionMaxQuery((name:of | title:of))')]", @@ -1768,18 +1780,18 @@ public void testAutoGeneratePhraseQueries() throws Exception { // // crow blackbird, grackle - try (SolrQueryRequest req = req(sowFalseParams)) { - QParser qParser = QParser.getParser("text:grackle", "edismax", req); // "text" has autoGeneratePhraseQueries="true" - Query q = qParser.getQuery(); - assertEquals("+(text:\"crow blackbird\" text:grackl)", q.toString()); - } - for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams)) { + for (SolrParams params : Arrays.asList(noSowParams, sowFalseParams)) { try (SolrQueryRequest req = req(params)) { - QParser qParser = QParser.getParser("text:grackle", "edismax", req); + QParser qParser = QParser.getParser("text:grackle", "edismax", req); // "text" has autoGeneratePhraseQueries="true" Query q = qParser.getQuery(); - assertEquals("+spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])", q.toString()); + assertEquals("+(text:\"crow blackbird\" text:grackl)", q.toString()); } } + try (SolrQueryRequest req = req(sowTrueParams)) { + QParser qParser = QParser.getParser("text:grackle", "edismax", req); + Query q = qParser.getQuery(); + assertEquals("+spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])", q.toString()); + } for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams, sowFalseParams)) { try (SolrQueryRequest req = req(params)) { QParser qParser = QParser.getParser("text_sw:grackle", "edismax", req); // "text_sw" doesn't specify autoGeneratePhraseQueries => default false @@ -1790,35 +1802,58 @@ public void testAutoGeneratePhraseQueries() throws Exception { Stream.of(noSowParams, sowTrueParams, sowFalseParams).forEach(p->p.add("qf", "text text_sw")); - try (SolrQueryRequest req = req(sowFalseParams)) { - QParser qParser = QParser.getParser("grackle", "edismax", req); - Query q = qParser.getQuery(); - assertEquals("+((text:\"crow blackbird\" text:grackl)" - + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl))", - q.toString()); - - qParser = QParser.getParser("grackle wi fi", "edismax", req); - q = qParser.getQuery(); - assertEquals("+(((text:\"crow blackbird\" text:grackl) text:wifi)" - + " | (((+text_sw:crow +text_sw:blackbird) text_sw:grackl) text_sw:wifi))", - q.toString()); - } - - for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams)) { + for (SolrParams params : Arrays.asList(noSowParams, sowFalseParams)) { try (SolrQueryRequest req = req(params)) { QParser qParser = QParser.getParser("grackle", "edismax", req); Query q = qParser.getQuery(); - assertEquals("+(spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])" + assertEquals("+((text:\"crow blackbird\" text:grackl)" + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl))", q.toString()); qParser = QParser.getParser("grackle wi fi", "edismax", req); q = qParser.getQuery(); - assertEquals("+((spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])" - + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl)) (text:wi | text_sw:wi) (text:fi | text_sw:fi))", + assertEquals("+(((text:\"crow blackbird\" text:grackl) text:wifi)" + + " | (((+text_sw:crow +text_sw:blackbird) text_sw:grackl) text_sw:wifi))", q.toString()); } } + + try (SolrQueryRequest req = req(sowTrueParams)) { + QParser qParser = QParser.getParser("grackle", "edismax", req); + Query q = qParser.getQuery(); + assertEquals("+(spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])" + + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl))", + q.toString()); + + qParser = QParser.getParser("grackle wi fi", "edismax", req); + q = qParser.getQuery(); + assertEquals("+((spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])" + + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl)) (text:wi | text_sw:wi) (text:fi | text_sw:fi))", + q.toString()); + } + } + + public void testSowFalseWithBoost() throws Exception { + try (SolrQueryRequest req = req("sow", "false", "qf", "subject title")) { + QParser qParser = QParser.getParser("one two", "edismax", req); + Query q = qParser.getQuery(); + assertEquals("+((title:one | subject:on) (title:two | subject:two))", q.toString()); + } + try (SolrQueryRequest req = req("sow", "false", "qf", "subject title^5")) { + QParser qParser = QParser.getParser("one two", "edismax", req); + Query q = qParser.getQuery(); + assertEquals("+(((title:one)^5.0 | subject:on) ((title:two)^5.0 | subject:two))", q.toString()); + } + try (SolrQueryRequest req = req("sow", "false", "qf", "subject^3 title")) { + QParser qParser = QParser.getParser("one two", "edismax", req); + Query q = qParser.getQuery(); + assertEquals("+((title:one | (subject:on)^3.0) (title:two | (subject:two)^3.0))", q.toString()); + } + try (SolrQueryRequest req = req("sow", "false", "qf", "subject^10 title^20")) { + QParser qParser = QParser.getParser("one two", "edismax", req); + Query q = qParser.getQuery(); + assertEquals("+(((title:one)^20.0 | (subject:on)^10.0) ((title:two)^20.0 | (subject:two)^10.0))", q.toString()); + } } @@ -1915,6 +1950,8 @@ public MultilanguageDismaxConfiguration(SolrParams localParams, static class FuzzyDismaxQParser extends ExtendedDismaxQParser { + + private static final float MIN_SIMILARITY = 0.75F; public FuzzyDismaxQParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { @@ -1935,16 +1972,50 @@ public FuzzyQueryParser(QParser parser, String defaultField) { super(parser, defaultField); frequentlyMisspelledWords = new HashSet<>(); frequentlyMisspelledWords.add("absence"); + frequentlyMisspelledWords.add("absenc"); } @Override protected Query getFieldQuery(String field, String val, boolean quoted, boolean raw) throws SyntaxError { if(frequentlyMisspelledWords.contains(val)) { - return getFuzzyQuery(field, val, 0.75F); + return getFuzzyQuery(field, val, MIN_SIMILARITY); } return super.getFieldQuery(field, val, quoted, raw); } + + /** + * Handle multi-term queries by repacking boolean queries with frequently misspelled term + * queries rewritten as fuzzy queries. + **/ + @Override + protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, + boolean quoted, boolean fieldAutoGenPhraseQueries, boolean fieldEnableGraphQueries) + throws SyntaxError { + Query q = super.newFieldQuery + (analyzer, field, queryText, quoted, fieldAutoGenPhraseQueries, fieldEnableGraphQueries); + if (q instanceof BooleanQuery) { + boolean rewrittenSubQ = false; // dirty flag: rebuild the repacked query? + BooleanQuery.Builder builder = newBooleanQuery(); + for (BooleanClause clause : ((BooleanQuery)q).clauses()) { + Query subQ = clause.getQuery(); + if (subQ instanceof TermQuery) { + Term subTerm = ((TermQuery)subQ).getTerm(); + if (frequentlyMisspelledWords.contains(subTerm.text())) { + rewrittenSubQ = true; + Query fuzzySubQ = newFuzzyQuery(subTerm, MIN_SIMILARITY, getFuzzyPrefixLength()); + clause = newBooleanClause(fuzzySubQ, clause.getOccur()); + } + } + builder.add(clause); + } + if (rewrittenSubQ) { + builder.setMinimumNumberShouldMatch(((BooleanQuery)q).getMinimumNumberShouldMatch()); + q = builder.build(); + } + } + return q; + } } } diff --git a/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java b/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java index 0034b13e4d98..72fc9cedbb5b 100644 --- a/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java @@ -17,12 +17,13 @@ package org.apache.solr.search; import org.apache.lucene.util.LuceneTestCase; -import org.apache.solr.common.util.NamedList; +import org.apache.lucene.util.TestUtil; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.util.ConcurrentLRUCache; import org.apache.solr.util.RTimer; import java.io.IOException; -import java.io.Serializable; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -37,9 +38,14 @@ * @since solr 1.4 */ public class TestFastLRUCache extends LuceneTestCase { - + SolrMetricManager metricManager = new SolrMetricManager(); + String registry = TestUtil.randomSimpleString(random(), 2, 10); + String scope = TestUtil.randomSimpleString(random(), 2, 10); + public void testPercentageAutowarm() throws IOException { FastLRUCache fastCache = new FastLRUCache<>(); + fastCache.initializeMetrics(metricManager, registry, scope); + MetricsMap metrics = fastCache.getMetricsMap(); Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); @@ -52,12 +58,14 @@ public void testPercentageAutowarm() throws IOException { } assertEquals("25", fastCache.get(25)); assertEquals(null, fastCache.get(110)); - NamedList nl = fastCache.getStatistics(); + Map nl = metrics.getValue(); assertEquals(2L, nl.get("lookups")); assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); assertEquals(null, fastCache.get(1)); // first item put in should be the first out FastLRUCache fastCacheNew = new FastLRUCache<>(); + fastCacheNew.initializeMetrics(metricManager, registry, scope); + metrics = fastCacheNew.getMetricsMap(); fastCacheNew.init(params, o, cr); fastCacheNew.warm(null, fastCache); fastCacheNew.setState(SolrCache.State.LIVE); @@ -65,7 +73,7 @@ public void testPercentageAutowarm() throws IOException { fastCacheNew.put(103, "103"); assertEquals("90", fastCacheNew.get(90)); assertEquals("50", fastCacheNew.get(50)); - nl = fastCacheNew.getStatistics(); + nl = metrics.getValue(); assertEquals(2L, nl.get("lookups")); assertEquals(2L, nl.get("hits")); assertEquals(1L, nl.get("inserts")); @@ -86,6 +94,7 @@ public void testPercentageAutowarmMultiple() throws IOException { private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int[]misses) { FastLRUCache fastCache = new FastLRUCache<>(); + fastCache.initializeMetrics(metricManager, registry, scope); Map params = new HashMap<>(); params.put("size", String.valueOf(limit)); params.put("initialSize", "10"); @@ -98,6 +107,7 @@ private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int } FastLRUCache fastCacheNew = new FastLRUCache<>(); + fastCacheNew.initializeMetrics(metricManager, registry, scope); fastCacheNew.init(params, o, cr); fastCacheNew.warm(null, fastCache); fastCacheNew.setState(SolrCache.State.LIVE); @@ -110,7 +120,7 @@ private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int for(int miss:misses) { assertEquals("The value " + miss + " should NOT be on new cache", null, fastCacheNew.get(miss)); } - NamedList nl = fastCacheNew.getStatistics(); + Map nl = fastCacheNew.getMetricsMap().getValue(); assertEquals(Long.valueOf(hits.length + misses.length), nl.get("lookups")); assertEquals(Long.valueOf(hits.length), nl.get("hits")); fastCacheNew.close(); @@ -118,6 +128,7 @@ private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int public void testNoAutowarm() throws IOException { FastLRUCache fastCache = new FastLRUCache<>(); + fastCache.initializeMetrics(metricManager, registry, scope); Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); @@ -129,7 +140,7 @@ public void testNoAutowarm() throws IOException { } assertEquals("25", fastCache.get(25)); assertEquals(null, fastCache.get(110)); - NamedList nl = fastCache.getStatistics(); + Map nl = fastCache.getMetricsMap().getValue(); assertEquals(2L, nl.get("lookups")); assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); @@ -177,6 +188,7 @@ public void testFullAutowarm() throws IOException { public void testSimple() throws IOException { FastLRUCache sc = new FastLRUCache(); + sc.initializeMetrics(metricManager, registry, scope); Map l = new HashMap(); l.put("size", "100"); l.put("initialSize", "10"); @@ -189,7 +201,8 @@ public void testSimple() throws IOException { } assertEquals("25", sc.get(25)); assertEquals(null, sc.get(110)); - NamedList nl = sc.getStatistics(); + MetricsMap metrics = sc.getMetricsMap(); + Map nl = metrics.getValue(); assertEquals(2L, nl.get("lookups")); assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); @@ -198,6 +211,7 @@ public void testSimple() throws IOException { FastLRUCache scNew = new FastLRUCache(); + scNew.initializeMetrics(metricManager, registry, scope); scNew.init(l, o, cr); scNew.warm(null, sc); scNew.setState(SolrCache.State.LIVE); @@ -205,7 +219,7 @@ public void testSimple() throws IOException { scNew.put(103, "103"); assertEquals("90", scNew.get(90)); assertEquals(null, scNew.get(50)); - nl = scNew.getStatistics(); + nl = scNew.getMetricsMap().getValue(); assertEquals(2L, nl.get("lookups")); assertEquals(1L, nl.get("hits")); assertEquals(1L, nl.get("inserts")); diff --git a/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java b/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java index 8fe3f9717ab3..c36066a98181 100644 --- a/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java +++ b/solr/core/src/test/org/apache/solr/search/TestIndexSearcher.java @@ -17,6 +17,7 @@ package org.apache.solr.search; import java.io.IOException; +import java.util.Date; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -25,6 +26,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Metric; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; @@ -137,13 +140,15 @@ public void testReopen() throws Exception { int baseRefCount = r3.getRefCount(); assertEquals(1, baseRefCount); - Object sr3SearcherRegAt = sr3.getSearcher().getStatistics().get("registeredAt"); + Map metrics = h.getCore().getCoreMetricManager().getRegistry().getMetrics(); + Gauge g = (Gauge)metrics.get("SEARCHER.searcher.registeredAt"); + Date sr3SearcherRegAt = g.getValue(); assertU(commit()); // nothing has changed SolrQueryRequest sr4 = req("q","foo"); assertSame("nothing changed, searcher should be the same", sr3.getSearcher(), sr4.getSearcher()); assertEquals("nothing changed, searcher should not have been re-registered", - sr3SearcherRegAt, sr4.getSearcher().getStatistics().get("registeredAt")); + sr3SearcherRegAt, g.getValue()); IndexReader r4 = sr4.getSearcher().getRawReader(); // force an index change so the registered searcher won't be the one we are testing (and diff --git a/solr/core/src/test/org/apache/solr/search/TestLFUCache.java b/solr/core/src/test/org/apache/solr/search/TestLFUCache.java index d137875653a5..8207522ddca8 100644 --- a/solr/core/src/test/org/apache/solr/search/TestLFUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestLFUCache.java @@ -16,9 +16,10 @@ */ package org.apache.solr.search; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.util.ExecutorUtil; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.util.ConcurrentLFUCache; import org.apache.solr.util.DefaultSolrThreadFactory; import org.apache.solr.util.RefCounted; @@ -32,6 +33,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -59,7 +61,7 @@ public void testTimeDecayParams() throws IOException { SolrIndexSearcher searcher = holder.get(); LFUCache cacheDecayTrue = (LFUCache) searcher.getCache("lfuCacheDecayTrue"); assertNotNull(cacheDecayTrue); - NamedList stats = cacheDecayTrue.getStatistics(); + Map stats = cacheDecayTrue.getMetricsMap().getValue(); assertTrue((Boolean) stats.get("timeDecay")); addCache(cacheDecayTrue, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); for (int idx = 0; idx < 64; ++idx) { @@ -70,7 +72,7 @@ public void testTimeDecayParams() throws IOException { LFUCache cacheDecayDefault = (LFUCache) searcher.getCache("lfuCacheDecayDefault"); assertNotNull(cacheDecayDefault); - stats = cacheDecayDefault.getStatistics(); + stats = cacheDecayDefault.getMetricsMap().getValue(); assertTrue((Boolean) stats.get("timeDecay")); addCache(cacheDecayDefault, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertCache(cacheDecayDefault, 1, 2, 3, 4, 5); @@ -84,7 +86,7 @@ public void testTimeDecayParams() throws IOException { LFUCache cacheDecayFalse = (LFUCache) searcher.getCache("lfuCacheDecayFalse"); assertNotNull(cacheDecayFalse); - stats = cacheDecayFalse.getStatistics(); + stats = cacheDecayFalse.getMetricsMap().getValue(); assertFalse((Boolean) stats.get("timeDecay")); addCache(cacheDecayFalse, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertCache(cacheDecayFalse, 1, 2, 3, 4, 5); @@ -131,9 +133,16 @@ private void assertNotCache(LFUCache cache, int... gets) { @Test public void testSimple() throws IOException { + SolrMetricManager metricManager = new SolrMetricManager(); + Random r = random(); + String registry = TestUtil.randomSimpleString(r, 2, 10); + String scope = TestUtil.randomSimpleString(r, 2, 10); LFUCache lfuCache = new LFUCache(); LFUCache newLFUCache = new LFUCache(); LFUCache noWarmLFUCache = new LFUCache(); + lfuCache.initializeMetrics(metricManager, registry, scope + ".lfuCache"); + newLFUCache.initializeMetrics(metricManager, registry, scope + ".newLFUCache"); + noWarmLFUCache.initializeMetrics(metricManager, registry, scope + ".noWarmLFUCache"); try { Map params = new HashMap(); params.put("size", "100"); @@ -148,7 +157,7 @@ public void testSimple() throws IOException { assertEquals("15", lfuCache.get(15)); assertEquals("75", lfuCache.get(75)); assertEquals(null, lfuCache.get(110)); - NamedList nl = lfuCache.getStatistics(); + Map nl = lfuCache.getMetricsMap().getValue(); assertEquals(3L, nl.get("lookups")); assertEquals(2L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); @@ -164,7 +173,7 @@ public void testSimple() throws IOException { assertEquals("15", newLFUCache.get(15)); assertEquals("75", newLFUCache.get(75)); assertEquals(null, newLFUCache.get(50)); - nl = newLFUCache.getStatistics(); + nl = newLFUCache.getMetricsMap().getValue(); assertEquals(3L, nl.get("lookups")); assertEquals(2L, nl.get("hits")); assertEquals(1L, nl.get("inserts")); diff --git a/solr/core/src/test/org/apache/solr/search/TestLRUCache.java b/solr/core/src/test/org/apache/solr/search/TestLRUCache.java index d2f74de5960b..fa34911b80a4 100644 --- a/solr/core/src/test/org/apache/solr/search/TestLRUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestLRUCache.java @@ -17,21 +17,25 @@ package org.apache.solr.search; import java.io.IOException; -import java.io.Serializable; import java.util.HashMap; import java.util.Map; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.TestUtil; import org.apache.solr.common.SolrException; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.SolrMetricManager; /** * Test for org.apache.solr.search.LRUCache */ public class TestLRUCache extends LuceneTestCase { + SolrMetricManager metricManager = new SolrMetricManager(); + String registry = TestUtil.randomSimpleString(random(), 2, 10); + String scope = TestUtil.randomSimpleString(random(), 2, 10); + public void testFullAutowarm() throws IOException { LRUCache lruCache = new LRUCache<>(); Map params = new HashMap<>(); @@ -97,6 +101,7 @@ private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int @SuppressWarnings("unchecked") public void testNoAutowarm() throws IOException { LRUCache lruCache = new LRUCache<>(); + lruCache.initializeMetrics(metricManager, registry, scope); Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); @@ -108,7 +113,7 @@ public void testNoAutowarm() throws IOException { } assertEquals("25", lruCache.get(25)); assertEquals(null, lruCache.get(110)); - NamedList nl = lruCache.getStatistics(); + Map nl = lruCache.getMetricsMap().getValue(); assertEquals(2L, nl.get("lookups")); assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); @@ -126,6 +131,7 @@ public void testNoAutowarm() throws IOException { public void testMaxRamSize() throws Exception { LRUCache accountableLRUCache = new LRUCache<>(); + accountableLRUCache.initializeMetrics(metricManager, registry, scope); Map params = new HashMap<>(); params.put("size", "5"); params.put("maxRamMB", "1"); @@ -149,7 +155,7 @@ public long ramBytesUsed() { }); assertEquals(1, accountableLRUCache.size()); assertEquals(baseSize + 512 * 1024 + LRUCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + LRUCache.DEFAULT_RAM_BYTES_USED, accountableLRUCache.ramBytesUsed()); - NamedList nl = accountableLRUCache.getStatistics(); + Map nl = accountableLRUCache.getMetricsMap().getValue(); assertEquals(1L, nl.get("evictions")); assertEquals(1L, nl.get("evictionsRamUsage")); accountableLRUCache.put("3", new Accountable() { @@ -158,7 +164,7 @@ public long ramBytesUsed() { return 1024; } }); - nl = accountableLRUCache.getStatistics(); + nl = accountableLRUCache.getMetricsMap().getValue(); assertEquals(1L, nl.get("evictions")); assertEquals(1L, nl.get("evictionsRamUsage")); assertEquals(2L, accountableLRUCache.size()); diff --git a/solr/core/src/test/org/apache/solr/search/TestReRankQParserPlugin.java b/solr/core/src/test/org/apache/solr/search/TestReRankQParserPlugin.java index e4d6a5b5fff2..42d05e9c8914 100644 --- a/solr/core/src/test/org/apache/solr/search/TestReRankQParserPlugin.java +++ b/solr/core/src/test/org/apache/solr/search/TestReRankQParserPlugin.java @@ -16,11 +16,12 @@ */ package org.apache.solr.search; +import java.util.Map; + import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.core.SolrInfoMBean; +import org.apache.solr.metrics.MetricsMap; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -376,8 +377,8 @@ public void testReRankQueries() throws Exception { "//result/doc[5]/float[@name='id'][.='2.0']" ); - SolrInfoMBean info = h.getCore().getInfoRegistry().get("queryResultCache"); - NamedList stats = info.getStatistics(); + MetricsMap metrics = (MetricsMap)h.getCore().getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache"); + Map stats = metrics.getValue(); long inserts = (Long) stats.get("inserts"); @@ -401,8 +402,7 @@ public void testReRankQueries() throws Exception { ); - info = h.getCore().getInfoRegistry().get("queryResultCache"); - stats = info.getStatistics(); + stats = metrics.getValue(); long inserts1 = (Long) stats.get("inserts"); @@ -426,8 +426,7 @@ public void testReRankQueries() throws Exception { "//result/doc[5]/float[@name='id'][.='1.0']" ); - info = h.getCore().getInfoRegistry().get("queryResultCache"); - stats = info.getStatistics(); + stats = metrics.getValue(); long inserts2 = (Long) stats.get("inserts"); //Last query was NOT added to the cache assertTrue(inserts1 == inserts2); diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java index 4b0c51c23522..b6ec6b16f391 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java +++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java @@ -17,6 +17,7 @@ package org.apache.solr.search; +import static org.apache.solr.search.TestRecovery.VersionProvider.*; import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM; import com.codahale.metrics.Gauge; @@ -292,10 +293,15 @@ public void testNewDBQAndDocMatchingOldDBQDuringLogReplay() throws Exception { @Test public void testLogReplayWithReorderedDBQ() throws Exception { testLogReplayWithReorderedDBQWrapper(() -> { - updateJ(jsonAdd(sdoc("id", "RDBQ1_1", "_version_", "1010")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); - updateJ(jsonDelQ("id:RDBQ1_2"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "-1017")); // This should've arrived after the 1015th update - updateJ(jsonAdd(sdoc("id", "RDBQ1_2", "_version_", "1015")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); - updateJ(jsonAdd(sdoc("id", "RDBQ1_3", "_version_", "1020")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + String v1010 = getNextVersion(); + String v1015 = getNextVersion(); + String v1017_del = "-" + getNextVersion(); + String v1020 = getNextVersion(); + + updateJ(jsonAdd(sdoc("id", "RDBQ1_1", "_version_", v1010)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonDelQ("id:RDBQ1_2"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v1017_del)); // This should've arrived after the ver2 update + updateJ(jsonAdd(sdoc("id", "RDBQ1_2", "_version_", v1015)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "RDBQ1_3", "_version_", v1020)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); }, () -> assertJQ(req("q", "*:*"), "/response/numFound==2") ); @@ -304,16 +310,22 @@ public void testLogReplayWithReorderedDBQ() throws Exception { @Test public void testLogReplayWithReorderedDBQByAsterixAndChildDocs() throws Exception { testLogReplayWithReorderedDBQWrapper(() -> { + String v1010 = getNextVersion(); + String v1012 = getNextVersion(); + String v1017_del = "-" + getNextVersion(); + String v1018 = getNextVersion(); + String v1020 = getNextVersion(); + // 1010 - will be deleted - updateJ(jsonAdd(sdocWithChildren("RDBQ2_1", "1010")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ2_1", v1010)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); // 1018 - should be kept, including child docs - updateJ(jsonAdd(sdocWithChildren("RDBQ2_2", "1018")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ2_2", v1018)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); // 1017 - delete should affect only 1010 - updateJ(jsonDelQ("_root_:RDBQ2_1 _root_:RDBQ2_2 id:RDBQ2_3 _root_:RDBQ2_4"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "-1017")); // This should've arrived after the 1015th update + updateJ(jsonDelQ("_root_:RDBQ2_1 _root_:RDBQ2_2 id:RDBQ2_3 _root_:RDBQ2_4"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v1017_del)); // This should've arrived after the ver2 update // 1012 - will be deleted - updateJ(jsonAdd(sdoc("id", "RDBQ2_3", "_version_", "1012")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "RDBQ2_3", "_version_", v1012)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); // 1020 - should be untouched - updateJ(jsonAdd(sdocWithChildren("RDBQ2_4", "1020")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ2_4", v1020)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); }, () -> assertJQ(req("q", "*:*"), "/response/numFound==6") ); @@ -322,16 +334,22 @@ public void testLogReplayWithReorderedDBQByAsterixAndChildDocs() throws Exceptio @Test public void testLogReplayWithReorderedDBQByIdAndChildDocs() throws Exception { testLogReplayWithReorderedDBQWrapper(() -> { + String v1010 = getNextVersion(); + String v1012 = getNextVersion(); + String v1017_del = "-" + getNextVersion(); + String v1018 = getNextVersion(); + String v1020 = getNextVersion(); + // 1010 - will be deleted - updateJ(jsonAdd(sdocWithChildren("RDBQ3_1", "1010")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ3_1", v1010)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); // 1018 - should be kept, including child docs - updateJ(jsonAdd(sdocWithChildren("RDBQ3_2", "1018")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ3_2", v1018)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); // 1017 - delete should affect only 1010 - updateJ(jsonDelQ("id:RDBQ3_1 id:RDBQ3_2 id:RDBQ3_3 id:RDBQ3_4"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "-1017")); // This should've arrived after the 1015th update + updateJ(jsonDelQ("id:RDBQ3_1 id:RDBQ3_2 id:RDBQ3_3 id:RDBQ3_4"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v1017_del)); // This should've arrived after the ver2 update // 1012 - will be deleted - updateJ(jsonAdd(sdoc("id", "RDBQ3_3", "_version_", "1012")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "RDBQ3_3", "_version_", v1012)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); // 1020 - should be untouched - updateJ(jsonAdd(sdocWithChildren("RDBQ3_4", "1020")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ3_4", v1020)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); }, () -> assertJQ(req("q", "*:*"), "/response/numFound==8") // RDBQ3_2, RDBQ3_4 and 6 children docs (delete by id does not delete child docs) ); @@ -340,10 +358,13 @@ public void testLogReplayWithReorderedDBQByIdAndChildDocs() throws Exception { @Test public void testLogReplayWithReorderedDBQInsertingChildnodes() throws Exception { testLogReplayWithReorderedDBQWrapper(() -> { - updateJ(jsonDelQ("id:RDBQ4_2"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "-1017")); + String v1013 = getNextVersion(); + String v1017_del = "-" + getNextVersion(); + + updateJ(jsonDelQ("id:RDBQ4_2"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v1017_del)); // test doc: B1 // 1013 - will be inserted with 3 children - updateJ(jsonAdd(sdocWithChildren("RDBQ4_1", "1013", 3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ4_1", v1013, 3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); }, () -> assertJQ(req("q", "*:*"), "/response/numFound==4") // RDBQ4_1 and RDBQ4_2, plus 2x 3 children ); @@ -353,17 +374,23 @@ public void testLogReplayWithReorderedDBQInsertingChildnodes() throws Exception @Test public void testLogReplayWithReorderedDBQUpdateWithDifferentChildCount() throws Exception { testLogReplayWithReorderedDBQWrapper(() -> { + String v1011 = getNextVersion(); + String v1012 = getNextVersion(); + String v1013 = getNextVersion(); + String v1018 = getNextVersion(); + String v1019_del = "-" + getNextVersion(); + // control - // 1013 - will be inserted with 3 children - updateJ(jsonAdd(sdocWithChildren("RDBQ5_1", "1011", 2)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + // 1011 - will be inserted with 3 children as 1012 + updateJ(jsonAdd(sdocWithChildren("RDBQ5_1", v1011, 2)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + // 1012 - this should be the final + updateJ(jsonAdd(sdocWithChildren("RDBQ5_1", v1012, 3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + + // 1013 - will be inserted with 3 children as 1018 + updateJ(jsonAdd(sdocWithChildren("RDBQ5_2", v1013, 2)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonDelQ("id:RDBQ5_3"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", v1019_del)); // 1018 - this should be the final - updateJ(jsonAdd(sdocWithChildren("RDBQ5_1", "1012", 3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); - - // 1013 - will be inserted with 3 children - updateJ(jsonAdd(sdocWithChildren("RDBQ5_2", "1013", 2)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); - updateJ(jsonDelQ("id:RDBQ5_3"), params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "-1019")); - // 1018 - this should be the final - updateJ(jsonAdd(sdocWithChildren("RDBQ5_2", "1018", 3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); + updateJ(jsonAdd(sdocWithChildren("RDBQ5_2", v1018, 3)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER)); }, () -> assertJQ(req("q", "*:*"), "/response/numFound==8") // RDBQ5_1+3children+RDBQ5_2+3children ); @@ -469,23 +496,43 @@ public void testBuffering() throws Exception { int initialOps = bufferedOps.getValue(); Meter applyingBuffered = (Meter)metrics.get("TLOG.applyingBuffered.ops"); long initialApplyingOps = applyingBuffered.getCount(); + + String v3 = getNextVersion(); + String v940_del = "-" + getNextVersion(); + String v950_del = "-" + getNextVersion(); + String v1010 = getNextVersion(); + String v1015 = getNextVersion(); + String v1017_del = "-" + getNextVersion(); + String v1020 = getNextVersion(); + String v1030 = getNextVersion(); + String v1040 = getNextVersion(); + String v1050 = getNextVersion(); + String v1060 = getNextVersion(); + String v1070 = getNextVersion(); + String v1080 = getNextVersion(); + String v2010_del = "-" + getNextVersion(); + String v2060_del = "-" + getNextVersion(); + String v3000_del = "-" + getNextVersion(); + + String versionListFirstCheck = String.join(",", v2010_del, v1030, v1020, v1017_del, v1015, v1010); + String versionListSecondCheck = String.join(",", v3000_del, v1080, v1050, v1060, v940_del, v1040 ,v3, v2010_del, v1030, v1020, v1017_del, v1015, v1010); // simulate updates from a leader - updateJ(jsonAdd(sdoc("id","B1", "_version_","1010")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","B11", "_version_","1015")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonDelQ("id:B1 id:B11 id:B2 id:B3"), params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-1017")); - updateJ(jsonAdd(sdoc("id","B2", "_version_","1020")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","B3", "_version_","1030")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - deleteAndGetVersion("B1", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-2010")); + updateJ(jsonAdd(sdoc("id","B1", "_version_",v1010)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B11", "_version_",v1015)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonDelQ("id:B1 id:B11 id:B2 id:B3"), params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_",v1017_del)); + updateJ(jsonAdd(sdoc("id","B2", "_version_",v1020)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B3", "_version_",v1030)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + deleteAndGetVersion("B1", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_",v2010_del)); assertJQ(req("qt","/get", "getVersions","6") - ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}" + ,"=={'versions':["+versionListFirstCheck+"]}" ); assertU(commit()); assertJQ(req("qt","/get", "getVersions","6") - ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}" + ,"=={'versions':["+versionListFirstCheck+"]}" ); // updates should be buffered, so we should not see any results yet. @@ -515,7 +562,7 @@ public void testBuffering() throws Exception { assertEquals(6L, applyingBuffered.getCount() - initialApplyingOps); assertJQ(req("qt","/get", "getVersions","6") - ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}" + ,"=={'versions':["+versionListFirstCheck+"]}" ); @@ -528,24 +575,24 @@ public void testBuffering() throws Exception { assertEquals(UpdateLog.State.BUFFERING, ulog.getState()); Long ver = getVer(req("qt","/get", "id","B3")); - assertEquals(1030L, ver.longValue()); + assertEquals(Long.valueOf(v1030), ver); // add a reordered doc that shouldn't overwrite one in the index - updateJ(jsonAdd(sdoc("id","B3", "_version_","3")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B3", "_version_",v3)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // reorder two buffered updates - updateJ(jsonAdd(sdoc("id","B4", "_version_","1040")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - deleteAndGetVersion("B4", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-940")); // this update should not take affect - updateJ(jsonAdd(sdoc("id","B6", "_version_","1060")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","B5", "_version_","1050")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","B8", "_version_","1080")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B4", "_version_",v1040)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + deleteAndGetVersion("B4", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_",v940_del)); // this update should not take affect + updateJ(jsonAdd(sdoc("id","B6", "_version_",v1060)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B5", "_version_",v1050)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B8", "_version_",v1080)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // test that delete by query is at least buffered along with everything else so it will delete the // currently buffered id:8 (even if it doesn't currently support versioning) - updateJ("{\"delete\": { \"query\":\"id:B2 OR id:B8\" }}", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-3000")); + updateJ("{\"delete\": { \"query\":\"id:B2 OR id:B8\" }}", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_",v3000_del)); assertJQ(req("qt","/get", "getVersions","13") - ,"=={'versions':[-3000,1080,1050,1060,-940,1040,3,-2010,1030,1020,-1017,1015,1010]}" // the "3" appears because versions aren't checked while buffering + ,"=={'versions':[" + versionListSecondCheck + "]}" // the "3" appears because versions aren't checked while buffering ); logReplay.drainPermits(); @@ -557,22 +604,22 @@ public void testBuffering() throws Exception { logReplay.release(1); // now add another update - updateJ(jsonAdd(sdoc("id","B7", "_version_","1070")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","B7", "_version_",v1070)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // a reordered update that should be dropped - deleteAndGetVersion("B5", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-950")); + deleteAndGetVersion("B5", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_",v950_del)); - deleteAndGetVersion("B6", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-2060")); + deleteAndGetVersion("B6", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_",v2060_del)); logReplay.release(1000); UpdateLog.RecoveryInfo recInfo = rinfoFuture.get(); assertJQ(req("q", "*:*", "sort","id asc", "fl","id,_version_") , "/response/docs==[" - + "{'id':'B3','_version_':1030}" - + ",{'id':'B4','_version_':1040}" - + ",{'id':'B5','_version_':1050}" - + ",{'id':'B7','_version_':1070}" + + "{'id':'B3','_version_':"+v1030+"}" + + ",{'id':'B4','_version_':"+v1040+"}" + + ",{'id':'B5','_version_':"+v1050+"}" + + ",{'id':'B7','_version_':"+v1070+"}" +"]" ); @@ -615,6 +662,22 @@ public void testDropBuffered() throws Exception { UpdateLog ulog = uhandler.getUpdateLog(); try { + String v101 = getNextVersion(); + String v102 = getNextVersion(); + String v103 = getNextVersion(); + String v104 = getNextVersion(); + String v105 = getNextVersion(); + String v200 = getNextVersion(); + String v201 = getNextVersion(); + String v203 = getNextVersion(); + String v204 = getNextVersion(); + String v205 = getNextVersion(); + String v206 = getNextVersion(); + String v301 = getNextVersion(); + String v302 = getNextVersion(); + String v998 = getNextVersion(); + String v999 = getNextVersion(); + clearIndex(); assertU(commit()); @@ -629,14 +692,14 @@ public void testDropBuffered() throws Exception { assertEquals(UpdateLog.State.BUFFERING, ulog.getState()); // simulate updates from a leader - updateJ(jsonAdd(sdoc("id","C1", "_version_","101")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C2", "_version_","102")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C3", "_version_","103")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C1", "_version_",v101)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C2", "_version_",v102)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C3", "_version_",v103)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); assertTrue(ulog.dropBufferedUpdates()); ulog.bufferUpdates(); - updateJ(jsonAdd(sdoc("id", "C4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id", "C5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "C4", "_version_",v104)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "C5", "_version_",v105)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); logReplay.release(1000); rinfoFuture = ulog.applyBufferedUpdates(); @@ -644,21 +707,21 @@ public void testDropBuffered() throws Exception { assertEquals(2, rinfo.adds); assertJQ(req("qt","/get", "getVersions","2") - ,"=={'versions':[105,104]}" + ,"=={'versions':["+v105+","+v104+"]}" ); // this time add some docs first before buffering starts (so tlog won't be at pos 0) - updateJ(jsonAdd(sdoc("id","C100", "_version_","200")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C101", "_version_","201")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C100", "_version_",v200)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C101", "_version_",v201)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); ulog.bufferUpdates(); - updateJ(jsonAdd(sdoc("id","C103", "_version_","203")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C104", "_version_","204")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C103", "_version_",v203)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C104", "_version_",v204)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); assertTrue(ulog.dropBufferedUpdates()); ulog.bufferUpdates(); - updateJ(jsonAdd(sdoc("id","C105", "_version_","205")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C106", "_version_","206")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C105", "_version_",v205)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C106", "_version_",v206)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); rinfoFuture = ulog.applyBufferedUpdates(); rinfo = rinfoFuture.get(); @@ -666,45 +729,45 @@ public void testDropBuffered() throws Exception { assertJQ(req("q", "*:*", "sort","_version_ asc", "fl","id,_version_") , "/response/docs==[" - + "{'id':'C4','_version_':104}" - + ",{'id':'C5','_version_':105}" - + ",{'id':'C100','_version_':200}" - + ",{'id':'C101','_version_':201}" - + ",{'id':'C105','_version_':205}" - + ",{'id':'C106','_version_':206}" + + "{'id':'C4','_version_':"+v104+"}" + + ",{'id':'C5','_version_':"+v105+"}" + + ",{'id':'C100','_version_':"+v200+"}" + + ",{'id':'C101','_version_':"+v201+"}" + + ",{'id':'C105','_version_':"+v205+"}" + + ",{'id':'C106','_version_':"+v206+"}" +"]" ); assertJQ(req("qt","/get", "getVersions","6") - ,"=={'versions':[206,205,201,200,105,104]}" + ,"=={'versions':["+String.join(",",v206,v205,v201,v200,v105,v104)+"]}" ); ulog.bufferUpdates(); assertEquals(UpdateLog.State.BUFFERING, ulog.getState()); - updateJ(jsonAdd(sdoc("id","C301", "_version_","998")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C302", "_version_","999")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C301", "_version_",v998)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C302", "_version_",v999)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); assertTrue(ulog.dropBufferedUpdates()); // make sure we can overwrite with a lower version // TODO: is this functionality needed? - updateJ(jsonAdd(sdoc("id","C301", "_version_","301")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","C302", "_version_","302")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C301", "_version_",v301)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C302", "_version_",v302)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); assertU(commit()); assertJQ(req("qt","/get", "getVersions","2") - ,"=={'versions':[302,301]}" + ,"=={'versions':["+v302+","+v301+"]}" ); assertJQ(req("q", "*:*", "sort","_version_ desc", "fl","id,_version_", "rows","2") , "/response/docs==[" - + "{'id':'C302','_version_':302}" - + ",{'id':'C301','_version_':301}" + + "{'id':'C302','_version_':"+v302+"}" + + ",{'id':'C301','_version_':"+v301+"}" +"]" ); - updateJ(jsonAdd(sdoc("id","C2", "_version_","302")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","C2", "_version_",v302)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); @@ -744,6 +807,18 @@ public void testBufferedMultipleCalls() throws Exception { Future rinfoFuture; try { + String v101 = getNextVersion(); + String v102 = getNextVersion(); + String v103 = getNextVersion(); + String v104 = getNextVersion(); + String v105 = getNextVersion(); + String v200 = getNextVersion(); + String v201 = getNextVersion(); + String v203 = getNextVersion(); + String v204 = getNextVersion(); + String v205 = getNextVersion(); + String v206 = getNextVersion(); + clearIndex(); assertU(commit()); assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); @@ -752,16 +827,16 @@ public void testBufferedMultipleCalls() throws Exception { assertEquals(UpdateLog.State.BUFFERING, ulog.getState()); // simulate updates from a leader - updateJ(jsonAdd(sdoc("id","c1", "_version_","101")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","c2", "_version_","102")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","c3", "_version_","103")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c1", "_version_",v101)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c2", "_version_",v102)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c3", "_version_",v103)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // call bufferUpdates again (this currently happens when recovery fails)... we should get a new starting point ulog.bufferUpdates(); assertEquals(UpdateLog.State.BUFFERING, ulog.getState()); - updateJ(jsonAdd(sdoc("id", "c4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id", "c5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "c4", "_version_",v104)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id", "c5", "_version_",v105)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); logReplay.release(1000); rinfoFuture = ulog.applyBufferedUpdates(); @@ -769,21 +844,21 @@ public void testBufferedMultipleCalls() throws Exception { assertEquals(2, rinfo.adds); assertJQ(req("qt","/get", "getVersions","2") - ,"=={'versions':[105,104]}" + ,"=={'versions':["+v105+","+v104+"]}" ); // this time add some docs first before buffering starts (so tlog won't be at pos 0) - updateJ(jsonAdd(sdoc("id","c100", "_version_","200")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","c101", "_version_","201")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c100", "_version_",v200)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c101", "_version_",v201)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); ulog.bufferUpdates(); - updateJ(jsonAdd(sdoc("id","c103", "_version_","203")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","c104", "_version_","204")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c103", "_version_",v203)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c104", "_version_",v204)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // call bufferUpdates again (this currently happens when recovery fails)... we should get a new starting point ulog.bufferUpdates(); - updateJ(jsonAdd(sdoc("id","c105", "_version_","205")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","c106", "_version_","206")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c105", "_version_",v205)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","c106", "_version_",v206)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); rinfoFuture = ulog.applyBufferedUpdates(); rinfo = rinfoFuture.get(); @@ -791,19 +866,19 @@ public void testBufferedMultipleCalls() throws Exception { assertJQ(req("q", "*:*", "sort","_version_ asc", "fl","id,_version_") , "/response/docs==[" - + "{'id':'c4','_version_':104}" - + ",{'id':'c5','_version_':105}" - + ",{'id':'c100','_version_':200}" - + ",{'id':'c101','_version_':201}" - + ",{'id':'c105','_version_':205}" - + ",{'id':'c106','_version_':206}" - +"]" + + "{'id':'c4','_version_':"+v104+"}" + + ",{'id':'c5','_version_':"+v105+"}" + + ",{'id':'c100','_version_':"+v200+"}" + + ",{'id':'c101','_version_':"+v201+"}" + + ",{'id':'c105','_version_':"+v205+"}" + + ",{'id':'c106','_version_':"+v206+"}" ++"" +"]" ); // The updates that were buffered (but never applied) still appear in recent versions! // This is good for some uses, but may not be good for others. assertJQ(req("qt","/get", "getVersions","11") - ,"=={'versions':[206,205,204,203,201,200,105,104,103,102,101]}" + ,"=={'versions':["+String.join(",",v206,v205,v204,v203,v201,v200,v105,v104,v103,v102,v101)+"]}" ); assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state @@ -864,6 +939,14 @@ public void testBufferingFlags() throws Exception { UpdateLog ulog = uhandler.getUpdateLog(); try { + String v101 = getNextVersion(); + String v102 = getNextVersion(); + String v103 = getNextVersion(); + String v114 = getNextVersion(); + String v115 = getNextVersion(); + String v116 = getNextVersion(); + String v117 = getNextVersion(); + clearIndex(); assertU(commit()); @@ -871,9 +954,9 @@ public void testBufferingFlags() throws Exception { ulog.bufferUpdates(); // simulate updates from a leader - updateJ(jsonAdd(sdoc("id","Q1", "_version_","101")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","Q2", "_version_","102")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","Q3", "_version_","103")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","Q1", "_version_",v101)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","Q2", "_version_",v102)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","Q3", "_version_",v103)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); assertEquals(UpdateLog.State.BUFFERING, ulog.getState()); req.close(); @@ -903,9 +986,9 @@ public void testBufferingFlags() throws Exception { assertTrue((ulog.getStartingOperation() & UpdateLog.FLAG_GAP) != 0); // now do some normal non-buffered adds - updateJ(jsonAdd(sdoc("id","Q4", "_version_","114")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","Q5", "_version_","115")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","Q6", "_version_","116")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","Q4", "_version_",v114)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","Q5", "_version_",v115)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","Q6", "_version_",v116)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); assertU(commit()); req.close(); @@ -921,7 +1004,7 @@ public void testBufferingFlags() throws Exception { ulog.bufferUpdates(); // simulate receiving no updates ulog.applyBufferedUpdates(); - updateJ(jsonAdd(sdoc("id","Q7", "_version_","117")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // do another add to make sure flags are back to normal + updateJ(jsonAdd(sdoc("id","Q7", "_version_",v117)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // do another add to make sure flags are back to normal req.close(); h.close(); @@ -950,26 +1033,29 @@ public void testBufferingFlags() throws Exception { // make sure that on a restart, versions don't start too low @Test public void testVersionsOnRestart() throws Exception { + String v1 = getNextVersion(); + String v2 = getNextVersion(); + clearIndex(); assertU(commit()); - assertU(adoc("id","D1", "val_i","1")); - assertU(adoc("id","D2", "val_i","1")); + assertU(adoc("id","D1", "val_i",v1)); + assertU(adoc("id","D2", "val_i",v1)); assertU(commit()); - long v1 = getVer(req("q","id:D1")); - long v1a = getVer(req("q","id:D2")); + long D1Version1 = getVer(req("q","id:D1")); + long D2Version1 = getVer(req("q","id:D2")); h.close(); createCore(); - assertU(adoc("id","D1", "val_i","2")); + assertU(adoc("id","D1", "val_i",v2)); assertU(commit()); - long v2 = getVer(req("q","id:D1")); + long D1Version2 = getVer(req("q","id:D1")); - assert(v2 > v1); + assert(D1Version2 > D1Version1); assertJQ(req("qt","/get", "getVersions","2") - ,"/versions==[" + v2 + "," + v1a + "]" + ,"/versions==[" + D1Version2 + "," + D2Version1 + "]" ); } @@ -997,11 +1083,13 @@ public void testCleanShutdown() throws Exception { UpdateLog ulog = uhandler.getUpdateLog(); try { + String v1 = getNextVersion(); + clearIndex(); assertU(commit()); - assertU(adoc("id","E1", "val_i","1")); - assertU(adoc("id","E2", "val_i","1")); + assertU(adoc("id","E1", "val_i",v1)); + assertU(adoc("id","E2", "val_i",v1)); // set to a high enough number so this test won't hang on a bug logReplay.release(10); @@ -1203,13 +1291,17 @@ public void testTruncatedLog() throws Exception { // Now test that the bad log file doesn't mess up retrieving latest versions // - updateJ(jsonAdd(sdoc("id","F4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","F5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","F6", "_version_","106")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + String v104 = getNextVersion(); + String v105 = getNextVersion(); + String v106 = getNextVersion(); + + updateJ(jsonAdd(sdoc("id","F4", "_version_",v104)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","F5", "_version_",v105)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","F6", "_version_",v106)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // This currently skips the bad log file and also returns the version of the clearIndex (del *:*) // assertJQ(req("qt","/get", "getVersions","6"), "/versions==[106,105,104]"); - assertJQ(req("qt","/get", "getVersions","3"), "/versions==[106,105,104]"); + assertJQ(req("qt","/get", "getVersions","3"), "/versions==["+v106+","+v105+","+v104+"]"); } finally { DirectUpdateHandler2.commitOnClose = true; @@ -1259,14 +1351,16 @@ public void testCorruptLog() throws Exception { // // Now test that the bad log file doesn't mess up retrieving latest versions // + String v104 = getNextVersion(); + String v105 = getNextVersion(); + String v106 = getNextVersion(); - updateJ(jsonAdd(sdoc("id","G4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","G5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); - updateJ(jsonAdd(sdoc("id","G6", "_version_","106")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","G4", "_version_",v104)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","G5", "_version_",v105)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); + updateJ(jsonAdd(sdoc("id","G6", "_version_",v106)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // This currently skips the bad log file and also returns the version of the clearIndex (del *:*) - // assertJQ(req("qt","/get", "getVersions","6"), "/versions==[106,105,104]"); - assertJQ(req("qt","/get", "getVersions","3"), "/versions==[106,105,104]"); + assertJQ(req("qt","/get", "getVersions","3"), "/versions==["+v106+","+v105+","+v104+"]"); assertU(commit()); @@ -1554,5 +1648,13 @@ private static Long getVer(SolrQueryRequest req) throws Exception { return (Long)doc.get("_version_"); } + + static class VersionProvider{ + private static long version = 0; + + static String getNextVersion() { + return Long.toString(version++); + } + } } diff --git a/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java index 1fcfe9a75370..b909f15005ac 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java +++ b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial2.java @@ -20,6 +20,7 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.params.FacetParams; import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.request.SolrQueryRequest; import org.junit.Before; import org.junit.BeforeClass; @@ -117,13 +118,13 @@ public void testRptWithGeometryField() throws Exception { // The tricky thing is verifying the cache works correctly... - SolrCache cache = (SolrCache) h.getCore().getInfoRegistry().get("perSegSpatialFieldCache_srptgeom"); - assertEquals("1", cache.getStatistics().get("cumulative_inserts").toString()); - assertEquals("0", cache.getStatistics().get("cumulative_hits").toString()); + MetricsMap cacheMetrics = (MetricsMap) h.getCore().getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.perSegSpatialFieldCache_srptgeom"); + assertEquals("1", cacheMetrics.getValue().get("cumulative_inserts").toString()); + assertEquals("0", cacheMetrics.getValue().get("cumulative_hits").toString()); // Repeat the query earlier assertJQ(sameReq, "/response/numFound==1", "/response/docs/[0]/id=='1'"); - assertEquals("1", cache.getStatistics().get("cumulative_hits").toString()); + assertEquals("1", cacheMetrics.getValue().get("cumulative_hits").toString()); assertEquals("1 segment", 1, getSearcher().getRawReader().leaves().size()); @@ -141,7 +142,7 @@ public void testRptWithGeometryField() throws Exception { // When there are new segments, we accumulate another hit. This tests the cache was not blown away on commit. // Checking equality for the first reader's cache key indicates wether the cache should still be valid. Object leafKey2 = getFirstLeafReaderKey(); - assertEquals(leafKey1.equals(leafKey2) ? "2" : "1", cache.getStatistics().get("cumulative_hits").toString()); + assertEquals(leafKey1.equals(leafKey2) ? "2" : "1", cacheMetrics.getValue().get("cumulative_hits").toString()); // Now try to see if heatmaps work: diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrFieldCacheMBean.java b/solr/core/src/test/org/apache/solr/search/TestSolrFieldCacheBean.java similarity index 59% rename from solr/core/src/test/org/apache/solr/search/TestSolrFieldCacheMBean.java rename to solr/core/src/test/org/apache/solr/search/TestSolrFieldCacheBean.java index d11c91920902..3ae9c472073b 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSolrFieldCacheMBean.java +++ b/solr/core/src/test/org/apache/solr/search/TestSolrFieldCacheBean.java @@ -16,17 +16,21 @@ */ package org.apache.solr.search; +import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.invoke.MethodHandles; +import java.util.Map; +import java.util.Random; -public class TestSolrFieldCacheMBean extends SolrTestCaseJ4 { +public class TestSolrFieldCacheBean extends SolrTestCaseJ4 { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -66,18 +70,28 @@ public void testEntryList() throws Exception { } private void assertEntryListIncluded(boolean checkJmx) { - SolrFieldCacheMBean mbean = new SolrFieldCacheMBean(); - NamedList stats = checkJmx ? mbean.getStatisticsForJmx() : mbean.getStatistics(); - assert(Integer.parseInt(stats.get("entries_count").toString()) > 0); - assertNotNull(stats.get("total_size")); - assertNotNull(stats.get("entry#0")); + SolrFieldCacheBean mbean = new SolrFieldCacheBean(); + Random r = random(); + String registryName = TestUtil.randomSimpleString(r, 1, 10); + SolrMetricManager metricManager = h.getCoreContainer().getMetricManager(); + mbean.initializeMetrics(metricManager, registryName, null); + MetricsMap metricsMap = (MetricsMap)metricManager.registry(registryName).getMetrics().get("CACHE.fieldCache"); + Map metrics = checkJmx ? metricsMap.getValue(true) : metricsMap.getValue(); + assertTrue(((Number)metrics.get("entries_count")).longValue() > 0); + assertNotNull(metrics.get("total_size")); + assertNotNull(metrics.get("entry#0")); } private void assertEntryListNotIncluded(boolean checkJmx) { - SolrFieldCacheMBean mbean = new SolrFieldCacheMBean(); - NamedList stats = checkJmx ? mbean.getStatisticsForJmx() : mbean.getStatistics(); - assert(Integer.parseInt(stats.get("entries_count").toString()) > 0); - assertNull(stats.get("total_size")); - assertNull(stats.get("entry#0")); + SolrFieldCacheBean mbean = new SolrFieldCacheBean(); + Random r = random(); + String registryName = TestUtil.randomSimpleString(r, 1, 10); + SolrMetricManager metricManager = h.getCoreContainer().getMetricManager(); + mbean.initializeMetrics(metricManager, registryName, null); + MetricsMap metricsMap = (MetricsMap)metricManager.registry(registryName).getMetrics().get("CACHE.fieldCache"); + Map metrics = checkJmx ? metricsMap.getValue(true) : metricsMap.getValue(); + assertTrue(((Number)metrics.get("entries_count")).longValue() > 0); + assertNull(metrics.get("total_size")); + assertNull(metrics.get("entry#0")); } } diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java index e1372d81d7d6..1a2e57208226 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java +++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java @@ -34,9 +34,9 @@ import org.apache.lucene.search.TermQuery; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.params.MapSolrParams; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; -import org.apache.solr.core.SolrInfoMBean; import org.apache.solr.parser.QueryParser; import org.apache.solr.query.FilterQuery; import org.apache.solr.request.SolrQueryRequest; @@ -75,10 +75,23 @@ public static void createIndex() { @Test public void testPhrase() { + // "text" field's type has WordDelimiterGraphFilter (WDGFF) and autoGeneratePhraseQueries=true // should generate a phrase of "now cow" and match only one doc - assertQ(req("q", "text:now-cow", "indent", "true") + assertQ(req("q", "text:now-cow", "indent", "true", "sow","true") , "//*[@numFound='1']" ); + // When sow=false, autoGeneratePhraseQueries=true only works when a graph is produced + // (i.e. overlapping terms, e.g. if WDGFF's preserveOriginal=1 or concatenateWords=1). + // The WDGFF config on the "text" field doesn't produce a graph, so the generated query + // is not a phrase query. As a result, docs can match that don't match phrase query "now cow" + assertQ(req("q", "text:now-cow", "indent", "true", "sow","false") + , "//*[@numFound='2']" + ); + assertQ(req("q", "text:now-cow", "indent", "true") // default sow=false + , "//*[@numFound='2']" + ); + + // "text_np" field's type has WDGFF and (default) autoGeneratePhraseQueries=false // should generate a query of (now OR cow) and match both docs assertQ(req("q", "text_np:now-cow", "indent", "true") , "//*[@numFound='2']" @@ -389,33 +402,33 @@ public void testFilter() throws Exception { assertU(commit()); // arg... commit no longer "commits" unless there has been a change. - final SolrInfoMBean filterCacheStats - = h.getCore().getInfoRegistry().get("filterCache"); + final MetricsMap filterCacheStats = (MetricsMap)h.getCore().getCoreMetricManager().getRegistry() + .getMetrics().get("CACHE.searcher.filterCache"); assertNotNull(filterCacheStats); - final SolrInfoMBean queryCacheStats - = h.getCore().getInfoRegistry().get("queryResultCache"); + final MetricsMap queryCacheStats = (MetricsMap)h.getCore().getCoreMetricManager().getRegistry() + .getMetrics().get("CACHE.searcher.queryResultCache"); assertNotNull(queryCacheStats); - long inserts = (Long) filterCacheStats.getStatistics().get("inserts"); - long hits = (Long) filterCacheStats.getStatistics().get("hits"); + long inserts = (Long) filterCacheStats.getValue().get("inserts"); + long hits = (Long) filterCacheStats.getValue().get("hits"); assertJQ(req("q", "doesnotexist filter(id:1) filter(qqq_s:X) filter(abcdefg)") , "/response/numFound==2" ); inserts += 3; - assertEquals(inserts, ((Long) filterCacheStats.getStatistics().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getStatistics().get("hits")).longValue()); + assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); assertJQ(req("q", "doesnotexist2 filter(id:1) filter(qqq_s:X) filter(abcdefg)") , "/response/numFound==2" ); hits += 3; - assertEquals(inserts, ((Long) filterCacheStats.getStatistics().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getStatistics().get("hits")).longValue()); + assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // make sure normal "fq" parameters also hit the cache the same way assertJQ(req("q", "doesnotexist3", "fq", "id:1", "fq", "qqq_s:X", "fq", "abcdefg") @@ -423,8 +436,8 @@ public void testFilter() throws Exception { ); hits += 3; - assertEquals(inserts, ((Long) filterCacheStats.getStatistics().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getStatistics().get("hits")).longValue()); + assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // try a query deeply nested in a FQ assertJQ(req("q", "*:* doesnotexist4", "fq", "(id:* +(filter(id:1) filter(qqq_s:X) filter(abcdefg)) )") @@ -433,8 +446,8 @@ public void testFilter() throws Exception { inserts += 1; // +1 for top level fq hits += 3; - assertEquals(inserts, ((Long) filterCacheStats.getStatistics().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getStatistics().get("hits")).longValue()); + assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // retry the complex FQ and make sure hashCode/equals works as expected w/ filter queries assertJQ(req("q", "*:* doesnotexist5", "fq", "(id:* +(filter(id:1) filter(qqq_s:X) filter(abcdefg)) )") @@ -442,8 +455,8 @@ public void testFilter() throws Exception { ); hits += 1; // top-level fq should have been found. - assertEquals(inserts, ((Long) filterCacheStats.getStatistics().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getStatistics().get("hits")).longValue()); + assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // try nested filter with multiple top-level args (i.e. a boolean query) @@ -453,8 +466,8 @@ public void testFilter() throws Exception { hits += 1; // the inner filter inserts += 1; // the outer filter - assertEquals(inserts, ((Long) filterCacheStats.getStatistics().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getStatistics().get("hits")).longValue()); + assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // test the score for a filter, and that default score is 0 assertJQ(req("q", "+filter(*:*) +filter(id:1)", "fl", "id,score", "sort", "id asc") @@ -593,8 +606,9 @@ public void testSplitOnWhitespace_Basic() throws Exception { assertJQ(req("df", "syn", "q", "wi fi", "sow", "true") , "/response/numFound==0" ); - assertJQ(req("df", "syn", "q", "wi fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "wi fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); assertJQ(req("df", "syn", "q", "{!lucene sow=false}wi fi") @@ -605,8 +619,9 @@ public void testSplitOnWhitespace_Basic() throws Exception { , "/response/numFound==0" ); - assertJQ(req("df", "syn", "q", "{!lucene}wi fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "{!lucene}wi fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); } @@ -654,20 +669,25 @@ public void testSplitOnWhitespace_Comments() throws Exception { , "/response/numFound==0" ); - assertJQ(req("df", "syn", "q", "wi fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "wi fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "wi /* foo */ fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "wi /* foo */ fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "wi /* foo */ /* bar */ fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "wi /* foo */ /* bar */ fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "/* foo */ wi fi /* bar */") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", " /* foo */ wi fi /* bar */") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "/* foo */ wi /* bar */ fi /* baz */") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", " /* foo */ wi /* bar */ fi /* baz */") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); @@ -708,20 +728,25 @@ public void testSplitOnWhitespace_Comments() throws Exception { , "/response/numFound==0" ); - assertJQ(req("df", "syn", "q", "{!lucene}wi fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "{!lucene}wi fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "{!lucene}wi /* foo */ fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "{!lucene}wi /* foo */ fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "{!lucene}wi /* foo */ /* bar */ fi") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "{!lucene}wi /* foo */ /* bar */ fi") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "{!lucene}/* foo */ wi fi /* bar */") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "{!lucene}/* foo */ wi fi /* bar */") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); - assertJQ(req("df", "syn", "q", "{!lucene}/* foo */ wi /* bar */ fi /* baz */") // default sow=true - , "/response/numFound==0" + assertJQ(req("df", "syn", "q", "{!lucene}/* foo */ wi /* bar */ fi /* baz */") // default sow=false + , "/response/numFound==1" + , "/response/docs/[0]/id=='20'" ); } @@ -977,18 +1002,18 @@ public void testAutoGeneratePhraseQueries() throws Exception { // try (SolrQueryRequest req = req()) { - QParser qParser = QParser.getParser("text:grackle", req); // "text" has autoGeneratePhraseQueries="true" - qParser.setParams(sowFalseParams); - Query q = qParser.getQuery(); - assertEquals("text:\"crow blackbird\" text:grackl", q.toString()); - - for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams)) { - qParser = QParser.getParser("text:grackle", req); - qParser.setParams(params); - q = qParser.getQuery(); - assertEquals("spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])", q.toString()); + for (SolrParams params : Arrays.asList(noSowParams, sowFalseParams)) { + QParser qParser = QParser.getParser("text:grackle", req); // "text" has autoGeneratePhraseQueries="true" + qParser.setParams(sowFalseParams); + Query q = qParser.getQuery(); + assertEquals("text:\"crow blackbird\" text:grackl", q.toString()); } + QParser qParser = QParser.getParser("text:grackle", req); + qParser.setParams(sowTrueParams); + Query q = qParser.getQuery(); + assertEquals("spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])", q.toString()); + for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams, sowFalseParams)) { qParser = QParser.getParser("text_sw:grackle", req); // "text_sw" doesn't specify autoGeneratePhraseQueries => default false qParser.setParams(params); diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java index 1561b3eb72f3..635357603ee8 100644 --- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java +++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java @@ -244,22 +244,23 @@ public void testBasicRefinement() throws Exception { client.deleteByQuery("*:*", null); - ModifiableSolrParams p = params("cat_s", "cat_s", "xy_s", "xy_s", "num_d", "num_d", "qw_s", "qw_s"); + ModifiableSolrParams p = params("cat_s", "cat_s", "xy_s", "xy_s", "num_d", "num_d", "qw_s", "qw_s", "er_s","er_s"); String cat_s = p.get("cat_s"); String xy_s = p.get("xy_s"); String qw_s = p.get("qw_s"); + String er_s = p.get("er_s"); // this field is designed to test numBuckets refinement... the first phase will only have a single bucket returned for the top count bucket of cat_s String num_d = p.get("num_d"); - clients.get(0).add( sdoc("id", "01", "all_s","all", cat_s, "A", xy_s, "X" ,num_d, -1, qw_s, "Q") ); // A wins count tie - clients.get(0).add( sdoc("id", "02", "all_s","all", cat_s, "B", xy_s, "Y", num_d, 3 ) ); + clients.get(0).add( sdoc("id", "01", "all_s","all", cat_s, "A", xy_s, "X" ,num_d, -1, qw_s, "Q", er_s,"E") ); // A wins count tie + clients.get(0).add( sdoc("id", "02", "all_s","all", cat_s, "B", xy_s, "Y", num_d, 3 ) ); - clients.get(1).add( sdoc("id", "11", "all_s","all", cat_s, "B", xy_s, "X", num_d, -5 ) ); // B highest count - clients.get(1).add( sdoc("id", "12", "all_s","all", cat_s, "B", xy_s, "Y", num_d, -11, qw_s, "W") ); - clients.get(1).add( sdoc("id", "13", "all_s","all", cat_s, "A", xy_s, "X", num_d, 7 ) ); + clients.get(1).add( sdoc("id", "11", "all_s","all", cat_s, "B", xy_s, "X", num_d, -5 , er_s,"E") ); // B highest count + clients.get(1).add( sdoc("id", "12", "all_s","all", cat_s, "B", xy_s, "Y", num_d, -11, qw_s, "W" ) ); + clients.get(1).add( sdoc("id", "13", "all_s","all", cat_s, "A", xy_s, "X", num_d, 7 , er_s,"R") ); // "R" will only be picked up via refinement when parent facet is cat_s - clients.get(2).add( sdoc("id", "21", "all_s","all", cat_s, "A", xy_s, "X", num_d, 17, qw_s, "W") ); // A highest count - clients.get(2).add( sdoc("id", "22", "all_s","all", cat_s, "A", xy_s, "Y", num_d, -19 ) ); - clients.get(2).add( sdoc("id", "23", "all_s","all", cat_s, "B", xy_s, "X", num_d, 11 ) ); + clients.get(2).add( sdoc("id", "21", "all_s","all", cat_s, "A", xy_s, "X", num_d, 17, qw_s, "W", er_s,"E") ); // A highest count + clients.get(2).add( sdoc("id", "22", "all_s","all", cat_s, "A", xy_s, "Y", num_d, -19 ) ); + clients.get(2).add( sdoc("id", "23", "all_s","all", cat_s, "B", xy_s, "X", num_d, 11 ) ); client.commit(); @@ -388,7 +389,6 @@ public void testBasicRefinement() throws Exception { ); // test filling in missing "allBuckets" - // test filling in "missing" bucket for partially refined facets client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " cat :{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, allBuckets:true, facet:{ xy:{type:terms, field:${xy_s}, limit:1, overrequest:0, allBuckets:true, refine:false} } }" + @@ -402,6 +402,21 @@ public void testBasicRefinement() throws Exception { ",cat3:{ allBuckets:{count:8}, buckets:[ {val:A, count:4, xy:{buckets:[{count:3, val:X, f:23.0}], allBuckets:{count:4, f:4.0}}}] }" + "}" ); + + // test filling in missing numBuckets + client.testJQ(params(p, "q", "*:*", + "json.facet", "{" + + " cat :{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, numBuckets:true, facet:{ er:{type:terms, field:${er_s}, limit:1, overrequest:0, numBuckets:true, refine:false} } }" + + ", cat2:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true , numBuckets:true, facet:{ er:{type:terms, field:${er_s}, limit:1, overrequest:0, numBuckets:true, refine:true } } }" + + "}" + ) + , "facets=={ count:8" + + ", cat:{ numBuckets:2, buckets:[ {val:A, count:3, er:{numBuckets:1,buckets:[{count:2, val:E}] }}] }" + // the "R" bucket will not be seen w/o refinement + ",cat2:{ numBuckets:2, buckets:[ {val:A, count:4, er:{numBuckets:2,buckets:[{count:2, val:E}] }}] }" + + "}" + ); + + } diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java index 95c403a78eb2..a2b6f0695de3 100644 --- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java +++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Random; @@ -529,6 +530,7 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw " , f2:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'max(${num_d})'} } " + " , f3:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'unique(${where_s})'} } " + " , f4:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'hll(${where_s})'} } " + + " , f5:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'variance(${num_d})'} } " + "}" ) , "facets=={ 'count':6, " + @@ -536,6 +538,7 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw ", f2:{ 'buckets':[{ val:'B', count:3, x:11.0 }, { val:'A', count:2, x:4.0 }]} " + ", f3:{ 'buckets':[{ val:'A', count:2, x:2 }, { val:'B', count:3, x:2 }]} " + ", f4:{ 'buckets':[{ val:'A', count:2, x:2 }, { val:'B', count:3, x:2 }]} " + + ", f5:{ 'buckets':[{ val:'B', count:3, x:74.6666666666666 }, { val:'A', count:2, x:1.0 }]} " + "}" ); @@ -768,12 +771,12 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw "'f1':{ numBuckets:1, buckets:[{val:B, count:3}]} } " ); - // mincount should lower numBuckets + // mincount should not lower numBuckets (since SOLR-10552) client.testJQ(params(p, "q", "*:*", "rows", "0", "facet", "true" , "json.facet", "{f1:{terms:{${terms} field:${cat_s}, numBuckets:true, mincount:3}}}" ) , "facets=={ 'count':6, " + - "'f1':{ numBuckets:1, buckets:[{val:B, count:3}]} } " + "'f1':{ numBuckets:2, buckets:[{val:B, count:3}]} } " ); // basic range facet @@ -845,19 +848,18 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw ); - // stats at top level client.testJQ(params(p, "q", "*:*" , "json.facet", "{ sum1:'sum(${num_d})', sumsq1:'sumsq(${num_d})', avg1:'avg(${num_d})', avg2:'avg(def(${num_d},0))', min1:'min(${num_d})', max1:'max(${num_d})'" + ", numwhere:'unique(${where_s})', unique_num_i:'unique(${num_i})', unique_num_d:'unique(${num_d})', unique_date:'unique(${date})'" + ", where_hll:'hll(${where_s})', hll_num_i:'hll(${num_i})', hll_num_d:'hll(${num_d})', hll_date:'hll(${date})'" + - ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)' }" + ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)', variance:'variance(${num_d})', stddev:'stddev(${num_d})' }" ) , "facets=={ 'count':6, " + "sum1:3.0, sumsq1:247.0, avg1:0.6, avg2:0.5, min1:-9.0, max1:11.0" + ", numwhere:2, unique_num_i:4, unique_num_d:5, unique_date:5" + ", where_hll:2, hll_num_i:4, hll_num_d:5, hll_date:5" + - ", med:2.0, perc:[-9.0,2.0,11.0] }" + ", med:2.0, perc:[-9.0,2.0,11.0], variance:49.04, stddev:7.002856560004639}" ); // stats at top level, no matches @@ -865,21 +867,20 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw , "json.facet", "{ sum1:'sum(${num_d})', sumsq1:'sumsq(${num_d})', avg1:'avg(${num_d})', min1:'min(${num_d})', max1:'max(${num_d})'" + ", numwhere:'unique(${where_s})', unique_num_i:'unique(${num_i})', unique_num_d:'unique(${num_d})', unique_date:'unique(${date})'" + ", where_hll:'hll(${where_s})', hll_num_i:'hll(${num_i})', hll_num_d:'hll(${num_d})', hll_date:'hll(${date})'" + - ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)' }" + ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)', variance:'variance(${num_d})', stddev:'stddev(${num_d})' }" ) , "facets=={count:0 " + - "/* ,sum1:0.0, sumsq1:0.0, avg1:0.0, min1:'NaN', max1:'NaN', numwhere:0 */" + + "\n// ,sum1:0.0, sumsq1:0.0, avg1:0.0, min1:'NaN', max1:'NaN', numwhere:0 \n" + " }" ); - // stats at top level, matching documents, but no values in the field // NOTE: this represents the current state of what is returned, not the ultimate desired state. client.testJQ(params(p, "q", "id:3" , "json.facet", "{ sum1:'sum(${num_d})', sumsq1:'sumsq(${num_d})', avg1:'avg(${num_d})', min1:'min(${num_d})', max1:'max(${num_d})'" + ", numwhere:'unique(${where_s})', unique_num_i:'unique(${num_i})', unique_num_d:'unique(${num_d})', unique_date:'unique(${date})'" + ", where_hll:'hll(${where_s})', hll_num_i:'hll(${num_i})', hll_num_d:'hll(${num_d})', hll_date:'hll(${date})'" + - ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)' }" + ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)', variance:'variance(${num_d})', stddev:'stddev(${num_d})' }" ) , "facets=={count:1 " + ",sum1:0.0," + @@ -894,11 +895,12 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw " where_hll:0," + " hll_num_i:0," + " hll_num_d:0," + - " hll_date:0" + + " hll_date:0," + + " variance:0.0," + + " stddev:0.0" + " }" ); - // // tests on a multi-valued field with actual multiple values, just to ensure that we are // using a multi-valued method for the rest of the tests when appropriate. @@ -1135,7 +1137,7 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw ",f3:{${terms} type:field, field:${num_i}, sort:'index asc' }" + ",f4:{${terms} type:field, field:${num_i}, sort:'index desc' }" + ",f5:{${terms} type:field, field:${num_i}, sort:'index desc', limit:1, missing:true, allBuckets:true, numBuckets:true }" + - ",f6:{${terms} type:field, field:${num_i}, sort:'index desc', mincount:2, numBuckets:true }" + // mincount should lower numbuckets + ",f6:{${terms} type:field, field:${num_i}, sort:'index desc', mincount:2, numBuckets:true }" + // mincount should not lower numbuckets (since SOLR-10552) ",f7:{${terms} type:field, field:${num_i}, sort:'index desc', offset:2, numBuckets:true }" + // test offset ",f8:{${terms} type:field, field:${num_i}, sort:'index desc', offset:100, numBuckets:true }" + // test high offset ",f9:{${terms} type:field, field:${num_i}, sort:'x desc', facet:{x:'avg(${num_d})'}, missing:true, allBuckets:true, numBuckets:true }" + // test stats @@ -1149,7 +1151,7 @@ public static void doStatsTemplated(Client client, ModifiableSolrParams p) throw ",f3:{ buckets:[{val:-5,count:2},{val:2,count:1},{val:3,count:1},{val:7,count:1} ] } " + ",f4:{ buckets:[{val:7,count:1},{val:3,count:1},{val:2,count:1},{val:-5,count:2} ] } " + ",f5:{ buckets:[{val:7,count:1}] , numBuckets:4, allBuckets:{count:5}, missing:{count:1} } " + - ",f6:{ buckets:[{val:-5,count:2}] , numBuckets:1 } " + + ",f6:{ buckets:[{val:-5,count:2}] , numBuckets:4 } " + ",f7:{ buckets:[{val:2,count:1},{val:-5,count:2}] , numBuckets:4 } " + ",f8:{ buckets:[] , numBuckets:4 } " + ",f9:{ buckets:[{val:7,count:1,x:11.0},{val:2,count:1,x:4.0},{val:3,count:1,x:2.0},{val:-5,count:2,x:-7.0} ], numBuckets:4, allBuckets:{count:5,x:0.6},missing:{count:1,x:0.0} } " + // TODO: should missing exclude "x" because no values were collected? @@ -1313,6 +1315,10 @@ public void testBigger() throws Exception { doBigger( client, p ); } + private String getId(int id) { + return String.format(Locale.US, "%05d", id); + } + public void doBigger(Client client, ModifiableSolrParams p) throws Exception { MacroExpander m = new MacroExpander(p.getMap()); @@ -1331,7 +1337,7 @@ public void doBigger(Client client, ModifiableSolrParams p) throws Exception { for (int i=0; i> sub = model.get(cat); if (sub == null) { sub = new HashMap<>(); @@ -1370,6 +1376,23 @@ public void doBigger(Client client, ModifiableSolrParams p) throws Exception { ); } + client.testJQ(params(p, "q", "*:*" + , "json.facet", "{f1:{type:terms, field:id, limit:1, offset:990}}" + ) + , "facets=={ 'count':" + ndocs + "," + + "'f1':{buckets:[{val:'00990',count:1}]}} " + ); + + + for (int i=0; i<20; i++) { + int off = random().nextInt(ndocs); + client.testJQ(params(p, "q", "*:*", "off",Integer.toString(off) + , "json.facet", "{f1:{type:terms, field:id, limit:1, offset:${off}}}" + ) + , "facets=={ 'count':" + ndocs + "," + + "'f1':{buckets:[{val:'" + getId(off) + "',count:1}]}} " + ); + } } public void testTolerant() throws Exception { diff --git a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java index 39fa7915a252..8c2cec36e851 100644 --- a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java +++ b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java @@ -19,8 +19,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; -import org.apache.solr.common.util.NamedList; -import org.apache.solr.search.SolrCache; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.util.BaseTestHarness; import org.junit.BeforeClass; import org.junit.Test; @@ -32,6 +31,7 @@ import java.util.List; import java.util.ListIterator; import java.util.Locale; +import java.util.Map; import javax.xml.xpath.XPathConstants; @@ -276,15 +276,15 @@ public void testChildrenParser() { @Test public void testCacheHit() throws IOException { - SolrCache parentFilterCache = (SolrCache) h.getCore().getInfoRegistry() - .get("perSegFilter"); + MetricsMap parentFilterCache = (MetricsMap)h.getCore().getCoreMetricManager().getRegistry() + .getMetrics().get("CACHE.searcher.perSegFilter"); + MetricsMap filterCache = (MetricsMap)h.getCore().getCoreMetricManager().getRegistry() + .getMetrics().get("CACHE.searcher.filterCache"); - SolrCache filterCache = (SolrCache) h.getCore().getInfoRegistry() - .get("filterCache"); - NamedList parentsBefore = parentFilterCache.getStatistics(); + Map parentsBefore = parentFilterCache.getValue(); - NamedList filtersBefore = filterCache.getStatistics(); + Map filtersBefore = filterCache.getValue(); // it should be weird enough to be uniq String parentFilter = "parent_s:([a TO c] [d TO f])"; @@ -298,7 +298,7 @@ public void testCacheHit() throws IOException { "//*[@numFound='6']"); assertEquals("didn't hit fqCache yet ", 0L, - delta("hits", filterCache.getStatistics(), filtersBefore)); + delta("hits", filterCache.getValue(), filtersBefore)); assertQ( "filter by join", @@ -306,18 +306,18 @@ public void testCacheHit() throws IOException { + "\"}child_s:l"), "//*[@numFound='6']"); assertEquals("in cache mode every request lookups", 3, - delta("lookups", parentFilterCache.getStatistics(), parentsBefore)); + delta("lookups", parentFilterCache.getValue(), parentsBefore)); assertEquals("last two lookups causes hits", 2, - delta("hits", parentFilterCache.getStatistics(), parentsBefore)); + delta("hits", parentFilterCache.getValue(), parentsBefore)); assertEquals("the first lookup gets insert", 1, - delta("inserts", parentFilterCache.getStatistics(), parentsBefore)); + delta("inserts", parentFilterCache.getValue(), parentsBefore)); assertEquals("true join query is cached in fqCache", 1L, - delta("lookups", filterCache.getStatistics(), filtersBefore)); + delta("lookups", filterCache.getValue(), filtersBefore)); } - private long delta(String key, NamedList a, NamedList b) { + private long delta(String key, Map a, Map b) { return (Long) a.get(key) - (Long) b.get(key); } diff --git a/solr/core/src/test/org/apache/solr/search/join/BlockJoinFacetDistribTest.java b/solr/core/src/test/org/apache/solr/search/join/BlockJoinFacetDistribTest.java index 2bd30fa68074..1b126575c0f3 100644 --- a/solr/core/src/test/org/apache/solr/search/join/BlockJoinFacetDistribTest.java +++ b/solr/core/src/test/org/apache/solr/search/join/BlockJoinFacetDistribTest.java @@ -130,12 +130,18 @@ public Set get(Object key) { // to parent query final String childQueryClause = "COLOR_s:("+(matchingColors.toString().replaceAll("[,\\[\\]]", " "))+")"; + final boolean oldFacetsEnabled = random().nextBoolean(); QueryResponse results = query("q", "{!parent which=\"type_s:parent\"}"+childQueryClause, - "facet", random().nextBoolean() ? "true":"false", + "facet", oldFacetsEnabled ? "true":"false", // try to enforce multiple phases + oldFacetsEnabled ? "facet.field" : "ignore" , "BRAND_s", + oldFacetsEnabled&&usually() ? "facet.limit" : "ignore" , "1", + oldFacetsEnabled&&usually() ? "facet.mincount" : "ignore" , "2", + oldFacetsEnabled&&usually() ? "facet.overrequest.count" : "ignore" , "0", "qt", random().nextBoolean() ? "blockJoinDocSetFacetRH" : "blockJoinFacetRH", "child.facet.field", "COLOR_s", "child.facet.field", "SIZE_s", - "rows","0" // we care only abt results + "distrib.singlePass", random().nextBoolean() ? "true":"false", + "rows", random().nextBoolean() ? "0":"10" ); NamedList resultsResponse = results.getResponse(); assertNotNull(resultsResponse); diff --git a/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPNoScore.java b/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPNoScore.java index 3b23be8c9a51..0d9801e65415 100644 --- a/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPNoScore.java +++ b/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPNoScore.java @@ -170,10 +170,9 @@ public void testJoinQueryType() throws SyntaxError, IOException{ { final Query query = QParser.getParser(req.getParams().get("q"), req).getQuery(); final Query rewrittenQuery = query.rewrite(req.getSearcher().getIndexReader()); - assertTrue( - rewrittenQuery+" should be Lucene's", - rewrittenQuery.getClass().getPackage().getName() - .startsWith("org.apache.lucene")); + assertEquals(rewrittenQuery+" is expected to be from Solr", + ScoreJoinQParserPlugin.class.getPackage().getName(), + rewrittenQuery.getClass().getPackage().getName()); } { final Query query = QParser.getParser( diff --git a/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPScore.java b/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPScore.java index 17abf7834f1d..b9a2e78ffe28 100644 --- a/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPScore.java +++ b/solr/core/src/test/org/apache/solr/search/join/TestScoreJoinQPScore.java @@ -21,14 +21,16 @@ import java.util.Comparator; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Random; +import com.codahale.metrics.Metric; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; -import org.apache.solr.common.util.NamedList; +import org.apache.solr.metrics.MetricsMap; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestInfo; import org.apache.solr.response.SolrQueryResponse; @@ -198,22 +200,23 @@ public void testBoost() throws Exception { public void testCacheHit() throws Exception { indexDataForScorring(); - SolrCache cache = (SolrCache) h.getCore().getInfoRegistry() - .get("queryResultCache"); + Map metrics = h.getCoreContainer().getMetricManager().registry(h.getCore().getCoreMetricManager().getRegistryName()).getMetrics(); + + MetricsMap mm = (MetricsMap)metrics.get("CACHE.searcher.queryResultCache"); { - final NamedList statPre = cache.getStatistics(); + Map statPre = mm.getValue(); h.query(req("q", "{!join from=movieId_s to=id score=Avg}title:first", "fl", "id", "omitHeader", "true")); - assertHitOrInsert(cache, statPre); + assertHitOrInsert(mm.getValue(), statPre); } { - final NamedList statPre = cache.getStatistics(); + Map statPre = mm.getValue(); h.query(req("q", "{!join from=movieId_s to=id score=Avg}title:first", "fl", "id", "omitHeader", "true")); - assertHit(cache, statPre); + assertHit(mm.getValue(), statPre); } { - NamedList statPre = cache.getStatistics(); + Map statPre = mm.getValue(); Random r = random(); boolean changed = false; @@ -234,14 +237,14 @@ public void testCacheHit() throws Exception { //" b=" + boost + "}" + q, "fl", "id", "omitHeader", "true") ); - assertInsert(cache, statPre); + assertInsert(mm.getValue(), statPre); - statPre = cache.getStatistics(); + statPre = mm.getValue(); final String repeat = h.query(req("q", "{!join from=" + from + " to=" + to + " score=" + score.toLowerCase(Locale.ROOT) + //" b=" + boost "}" + q, "fl", "id", "omitHeader", "true") ); - assertHit(cache, statPre); + assertHit(mm.getValue(), statPre); assertEquals("lowercase shouldn't change anything", resp, repeat); @@ -254,6 +257,7 @@ public void testCacheHit() throws Exception { // this queries are not overlap, with other in this test case. // however it might be better to extract this method into the separate suite // for a while let's nuke a cache content, in case of repetitions + SolrCache cache = (SolrCache)h.getCore().getInfoRegistry().get("queryResultCache"); cache.clear(); } @@ -264,32 +268,32 @@ private ScoreMode not(ScoreMode s) { return l.get(r.nextInt(l.size())); } - private void assertInsert(SolrCache cache, final NamedList statPre) { + private void assertInsert(Map current, final Map statPre) { assertEquals("it lookups", 1, - delta("lookups", cache.getStatistics(), statPre)); - assertEquals("it doesn't hit", 0, delta("hits", cache.getStatistics(), statPre)); + delta("lookups", current, statPre)); + assertEquals("it doesn't hit", 0, delta("hits", current, statPre)); assertEquals("it inserts", 1, - delta("inserts", cache.getStatistics(), statPre)); + delta("inserts", current, statPre)); } - private void assertHit(SolrCache cache, final NamedList statPre) { + private void assertHit(Map current, final Map statPre) { assertEquals("it lookups", 1, - delta("lookups", cache.getStatistics(), statPre)); - assertEquals("it hits", 1, delta("hits", cache.getStatistics(), statPre)); + delta("lookups", current, statPre)); + assertEquals("it hits", 1, delta("hits", current, statPre)); assertEquals("it doesn't insert", 0, - delta("inserts", cache.getStatistics(), statPre)); + delta("inserts", current, statPre)); } - private void assertHitOrInsert(SolrCache cache, final NamedList statPre) { + private void assertHitOrInsert(Map current, final Map statPre) { assertEquals("it lookups", 1, - delta("lookups", cache.getStatistics(), statPre)); - final long mayHit = delta("hits", cache.getStatistics(), statPre); + delta("lookups", current, statPre)); + final long mayHit = delta("hits", current, statPre); assertTrue("it may hit", 0 == mayHit || 1 == mayHit); assertEquals("or insert on cold", 1, - delta("inserts", cache.getStatistics(), statPre) + mayHit); + delta("inserts", current, statPre) + mayHit); } - private long delta(String key, NamedList a, NamedList b) { + private long delta(String key, Map a, Map b) { return (Long) a.get(key) - (Long) b.get(key); } diff --git a/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorWithCollapseTest.java b/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorWithCollapseTest.java new file mode 100644 index 000000000000..f985b418c5bc --- /dev/null +++ b/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorWithCollapseTest.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.spelling; + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.params.CommonParams; +import org.apache.solr.common.params.SpellingParams; +import org.apache.solr.handler.component.SpellCheckComponent; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class SpellCheckCollatorWithCollapseTest extends SolrTestCaseJ4 { + @BeforeClass + public static void beforeClass() throws Exception { + initCore("solrconfig-collapseqparser.xml", "schema11.xml"); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + clearIndex(); + assertU(commit()); + } + + @Test + public void test() throws Exception { + for(int i=0 ; i<200 ; i++) { + String[] doc = {"id","" + i, "group_i", "" + (i % 10), "a_s", ((i%2)==0 ? "love" : "peace")}; + assertU(adoc(doc)); + if(i%5==0) { + assertU(commit()); + } + } + assertU(commit()); + assertQ( + req( + SpellCheckComponent.COMPONENT_NAME, "true", + SpellCheckComponent.SPELLCHECK_DICT, "direct", + SpellingParams.SPELLCHECK_COUNT, "10", + SpellingParams.SPELLCHECK_COLLATE, "true", + SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5", + SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1", + CommonParams.Q, "a_s:lpve", + CommonParams.QT, "spellCheckCompRH_Direct", + SpellingParams.SPELLCHECK_COLLATE_MAX_COLLECT_DOCS, "5", + CommonParams.FQ, "{!collapse field=group_i}", + "expand", "true"), + "//lst[@name='spellcheck']/lst[@name='collations']/str[@name='collation']='a_s:love'"); + } + +} diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java b/solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java index e91d762f5d42..534793fbc384 100644 --- a/solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java +++ b/solr/core/src/test/org/apache/solr/store/blockcache/BufferStoreTest.java @@ -17,9 +17,12 @@ package org.apache.solr.store.blockcache; import java.math.BigDecimal; +import java.util.Map; import org.apache.lucene.util.LuceneTestCase; -import org.apache.solr.common.util.NamedList; +import org.apache.lucene.util.TestUtil; +import org.apache.solr.metrics.MetricsMap; +import org.apache.solr.metrics.SolrMetricManager; import org.junit.Before; import org.junit.Test; @@ -27,12 +30,18 @@ public class BufferStoreTest extends LuceneTestCase { private final static int blockSize = 1024; private Metrics metrics; + private MetricsMap metricsMap; private Store store; @Before public void setup() { metrics = new Metrics(); + SolrMetricManager metricManager = new SolrMetricManager(); + String registry = TestUtil.randomSimpleString(random(), 2, 10); + String scope = TestUtil.randomSimpleString(random(), 2, 10); + metrics.initializeMetrics(metricManager, registry, scope); + metricsMap = (MetricsMap) metricManager.registry(registry).getMetrics().get("CACHE." + scope + ".hdfsBlockCache"); BufferStore.initNewBuffer(blockSize, blockSize, metrics); store = BufferStore.instance(blockSize); } @@ -77,7 +86,7 @@ private void assertRawMetricCounts(int allocated, int lost) { * whether buffers should have been lost since the last call */ private void assertGaugeMetricsChanged(boolean allocated, boolean lost) { - NamedList stats = metrics.getStatistics(); + Map stats = metricsMap.getValue(); assertEquals("Buffer allocation metric not updating correctly.", allocated, isMetricPositive(stats, "buffercache.allocations")); @@ -85,7 +94,7 @@ private void assertGaugeMetricsChanged(boolean allocated, boolean lost) { lost, isMetricPositive(stats, "buffercache.lost")); } - private boolean isMetricPositive(NamedList stats, String metric) { + private boolean isMetricPositive(Map stats, String metric) { return new BigDecimal(stats.get(metric).toString()).compareTo(BigDecimal.ZERO) > 0; } diff --git a/solr/core/src/test/org/apache/solr/update/VersionInfoTest.java b/solr/core/src/test/org/apache/solr/update/VersionInfoTest.java index 6bccd954b672..e8a85bdcebe5 100644 --- a/solr/core/src/test/org/apache/solr/update/VersionInfoTest.java +++ b/solr/core/src/test/org/apache/solr/update/VersionInfoTest.java @@ -101,7 +101,7 @@ protected void testMaxVersionLogic(SolrQueryRequest req) throws Exception { versionFromTLog, versionFromIndex); // reload the core, which should reset the max - CoreContainer coreContainer = req.getCore().getCoreDescriptor().getCoreContainer(); + CoreContainer coreContainer = req.getCore().getCoreContainer(); coreContainer.reload(req.getCore().getName()); maxVersionFromIndex = ulog.getMaxVersionFromIndex(); assertEquals("max version from index should be equal to version of last doc added after reload", diff --git a/solr/core/src/test/org/apache/solr/update/processor/ClassificationUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/ClassificationUpdateProcessorTest.java index 432bb02c68b3..aa536a77d9a6 100644 --- a/solr/core/src/test/org/apache/solr/update/processor/ClassificationUpdateProcessorTest.java +++ b/solr/core/src/test/org/apache/solr/update/processor/ClassificationUpdateProcessorTest.java @@ -99,7 +99,7 @@ public void classificationMonoClass_predictedClassFieldSet_shouldAssignClassInPr updateProcessorToTest=new ClassificationUpdateProcessor(params,mockProcessor,reader,req().getSchema()); updateProcessorToTest.processAdd(update); - assertThat(unseenDocument1.getFieldValue(PREDICTED_CLASS),is("class1")); + assertThat(unseenDocument1.getFieldValue(PREDICTED_CLASS),is("class2")); } @Test @@ -119,7 +119,7 @@ public void knnMonoClass_sampleParams_shouldAssignCorrectClass() throws Exceptio updateProcessorToTest=new ClassificationUpdateProcessor(params,mockProcessor,reader,req().getSchema()); updateProcessorToTest.processAdd(update); - assertThat(unseenDocument1.getFieldValue(TRAINING_CLASS),is("class1")); + assertThat(unseenDocument1.getFieldValue(TRAINING_CLASS),is("class2")); } @Test diff --git a/solr/core/src/test/org/apache/solr/util/MockCoreContainer.java b/solr/core/src/test/org/apache/solr/util/MockCoreContainer.java index 054415a935b2..5e8eab0f3007 100644 --- a/solr/core/src/test/org/apache/solr/util/MockCoreContainer.java +++ b/solr/core/src/test/org/apache/solr/util/MockCoreContainer.java @@ -26,7 +26,7 @@ public class MockCoreContainer extends CoreContainer { public static class MockCoreDescriptor extends CoreDescriptor { public MockCoreDescriptor() { - super(new MockCoreContainer(), "mock", Paths.get("path")); + super("mock", Paths.get("path"), null, false); } } diff --git a/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java b/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java index aa02de5cdf1f..4eb11691c67e 100644 --- a/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java +++ b/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java @@ -18,6 +18,7 @@ package org.apache.solr.util.stats; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -45,7 +46,11 @@ public void testSolrTimerGetSnapshot() { timer.update(Math.abs(random().nextInt()) + 1, TimeUnit.NANOSECONDS); } // obtain timer metrics - NamedList lst = new NamedList(MetricUtils.convertTimer(timer, false)); + Map map = new HashMap<>(); + MetricUtils.convertTimer("", timer, MetricUtils.PropertyFilter.ALL, false, false, (k, v) -> { + map.putAll((Map)v); + }); + NamedList lst = new NamedList(map); // check that expected metrics were obtained assertEquals(14, lst.size()); final Snapshot snapshot = timer.getSnapshot(); @@ -84,7 +89,7 @@ public void testMetrics() throws Exception { Gauge error = () -> {throw new InternalError("Memory Pool not found error");}; registry.register("memory.expected.error", error); MetricUtils.toMaps(registry, Collections.singletonList(MetricFilter.ALL), MetricFilter.ALL, - false, false, false, (k, o) -> { + MetricUtils.PropertyFilter.ALL, false, false, false, false, (k, o) -> { Map v = (Map)o; if (k.startsWith("counter")) { assertEquals(1L, v.get("count")); @@ -114,7 +119,7 @@ public void testMetrics() throws Exception { }); // test compact format MetricUtils.toMaps(registry, Collections.singletonList(MetricFilter.ALL), MetricFilter.ALL, - false, false, true, (k, o) -> { + MetricUtils.PropertyFilter.ALL, false, false, true, false, (k, o) -> { if (k.startsWith("counter")) { assertTrue(o instanceof Long); assertEquals(1L, o); diff --git a/solr/licenses/metrics-core-3.1.2.jar.sha1 b/solr/licenses/metrics-core-3.1.2.jar.sha1 deleted file mode 100644 index ca9ac38c3a16..000000000000 --- a/solr/licenses/metrics-core-3.1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -224f03afd2521c6c94632f566beb1bb5ee32cf07 diff --git a/solr/licenses/metrics-core-3.2.2.jar.sha1 b/solr/licenses/metrics-core-3.2.2.jar.sha1 new file mode 100644 index 000000000000..d14a04ef7a71 --- /dev/null +++ b/solr/licenses/metrics-core-3.2.2.jar.sha1 @@ -0,0 +1 @@ +cd9886f498ee2ab2d994f0c779e5553b2c450416 diff --git a/solr/licenses/metrics-ganglia-3.1.2.jar.sha1 b/solr/licenses/metrics-ganglia-3.1.2.jar.sha1 deleted file mode 100644 index 337754bf00af..000000000000 --- a/solr/licenses/metrics-ganglia-3.1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a4e2fcd6436f9b1771f0f9b6bab445dddcf704f diff --git a/solr/licenses/metrics-ganglia-3.2.2.jar.sha1 b/solr/licenses/metrics-ganglia-3.2.2.jar.sha1 new file mode 100644 index 000000000000..e5d8496ca500 --- /dev/null +++ b/solr/licenses/metrics-ganglia-3.2.2.jar.sha1 @@ -0,0 +1 @@ +d5bb1883e9b0daf0e4187e558746f5058f4585c1 diff --git a/solr/licenses/metrics-graphite-3.1.2.jar.sha1 b/solr/licenses/metrics-graphite-3.1.2.jar.sha1 deleted file mode 100644 index 34f01615d95b..000000000000 --- a/solr/licenses/metrics-graphite-3.1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -15a68399652c6123fe6e4c82ac4f0749e2eb6583 diff --git a/solr/licenses/metrics-graphite-3.2.2.jar.sha1 b/solr/licenses/metrics-graphite-3.2.2.jar.sha1 new file mode 100644 index 000000000000..5d11db4d518c --- /dev/null +++ b/solr/licenses/metrics-graphite-3.2.2.jar.sha1 @@ -0,0 +1 @@ +908e8cbec1bbdb2f4023334e424c7de2832a95af diff --git a/solr/licenses/metrics-jetty9-3.1.2.jar.sha1 b/solr/licenses/metrics-jetty9-3.1.2.jar.sha1 deleted file mode 100644 index 0722b0b42ece..000000000000 --- a/solr/licenses/metrics-jetty9-3.1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2fe1039424ca687bea5d09ec0bfa372bf7d062 diff --git a/solr/licenses/metrics-jetty9-3.2.2.jar.sha1 b/solr/licenses/metrics-jetty9-3.2.2.jar.sha1 new file mode 100644 index 000000000000..92d35089db6d --- /dev/null +++ b/solr/licenses/metrics-jetty9-3.2.2.jar.sha1 @@ -0,0 +1 @@ +3fc94d99f41dc3f5be5483c81828138104df4449 diff --git a/solr/licenses/metrics-jvm-3.1.2.jar.sha1 b/solr/licenses/metrics-jvm-3.1.2.jar.sha1 deleted file mode 100644 index 519fcddfc4d4..000000000000 --- a/solr/licenses/metrics-jvm-3.1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed364e77218e50fdcdebce4d982cb4d1f4a8c187 diff --git a/solr/licenses/metrics-jvm-3.2.2.jar.sha1 b/solr/licenses/metrics-jvm-3.2.2.jar.sha1 new file mode 100644 index 000000000000..0c02f93213b8 --- /dev/null +++ b/solr/licenses/metrics-jvm-3.2.2.jar.sha1 @@ -0,0 +1 @@ +9cbf2030242f7ffb97fae23f8a81421eb8d4ad45 diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java index 4c6dd51a7814..ac388d2c9cd4 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java @@ -16,7 +16,6 @@ */ package org.apache.solr.client.solrj.impl; -import java.io.Closeable; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.net.ConnectException; @@ -393,7 +392,7 @@ public CloudSolrClient(Collection zkHosts, String chroot, HttpClient htt */ @Deprecated public CloudSolrClient(Collection zkHosts, String chroot, HttpClient httpClient, LBHttpSolrClient lbSolrClient, boolean updatesToLeaders) { - this(zkHosts, chroot, httpClient, lbSolrClient, null, updatesToLeaders, false, null); + this(zkHosts, chroot, null, httpClient, lbSolrClient, null, updatesToLeaders, false, null); } /** @@ -407,9 +406,14 @@ public CloudSolrClient(Collection zkHosts, String chroot, HttpClient htt * each host in the zookeeper ensemble. Note that with certain * Collection types like HashSet, the order of hosts in the final * connect string may not be in the same order you added them. + * Provide only one of solrUrls or zkHosts. * @param chroot * A chroot value for zookeeper, starting with a forward slash. If no * chroot is required, use null. + * @param solrUrls + * A list of Solr URLs to configure the underlying {@link HttpClusterStateProvider}, which will + * use of the these URLs to fetch the list of live nodes for this Solr cluster. Provide only + * one of solrUrls or zkHosts. * @param httpClient * the {@link HttpClient} instance to be used for all requests. The provided httpClient should use a * multi-threaded connection manager. If null, a default HttpClient will be used. @@ -424,6 +428,7 @@ public CloudSolrClient(Collection zkHosts, String chroot, HttpClient htt */ private CloudSolrClient(Collection zkHosts, String chroot, + List solrUrls, HttpClient httpClient, LBHttpSolrClient lbSolrClient, LBHttpSolrClient.Builder lbHttpSolrClientBuilder, @@ -433,7 +438,21 @@ private CloudSolrClient(Collection zkHosts, ) { if (stateProvider == null) { - this.stateProvider = new ZkClientClusterStateProvider(zkHosts, chroot); + if (zkHosts != null && solrUrls != null) { + throw new IllegalArgumentException("Both zkHost(s) & solrUrl(s) have been specified. Only specify one."); + } + if (zkHosts != null) { + this.stateProvider = new ZkClientClusterStateProvider(zkHosts, chroot); + } else if (solrUrls != null && !solrUrls.isEmpty()) { + try { + this.stateProvider = new HttpClusterStateProvider(solrUrls, httpClient); + } catch (Exception e) { + throw new RuntimeException("Couldn't initialize a HttpClusterStateProvider (is/are the " + + "Solr server(s), " + solrUrls + ", down?)", e); + } + } else { + throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null."); + } } else { this.stateProvider = stateProvider; } @@ -1259,7 +1278,7 @@ protected NamedList sendRequest(SolrRequest request, String collection) Set liveNodes = stateProvider.liveNodes(); for (String liveNode : liveNodes) { theUrlList.add(ZkStateReader.getBaseUrlForNodeName(liveNode, - (String) stateProvider.getClusterProperties().getOrDefault(ZkStateReader.URL_SCHEME,"http"))); + (String) stateProvider.getClusterProperty(ZkStateReader.URL_SCHEME,"http"))); } } else { @@ -1365,7 +1384,7 @@ protected NamedList sendRequest(SolrRequest request, String collection) return rsp.getResponse(); } - Set getCollectionNames(String collection) { + private Set getCollectionNames(String collection) { // Extract each comma separated collection name and store in a List. List rawCollectionsList = StrUtils.splitSmart(collection, ",", true); Set collectionNames = new HashSet<>(); @@ -1602,6 +1621,7 @@ private static LBHttpSolrClient createLBHttpSolrClient(HttpClient httpClient) { */ public static class Builder { private Collection zkHosts; + private List solrUrls; private HttpClient httpClient; private String zkChroot; private LBHttpSolrClient loadBalancedSolrClient; @@ -1613,6 +1633,7 @@ public static class Builder { public Builder() { this.zkHosts = new ArrayList(); + this.solrUrls = new ArrayList(); this.shardLeadersOnly = true; } @@ -1629,7 +1650,28 @@ public Builder withZkHost(String zkHost) { this.zkHosts.add(zkHost); return this; } + + /** + * Provide a Solr URL to be used when configuring {@link CloudSolrClient} instances. + * + * Method may be called multiple times. One of the provided values will be used to fetch + * the list of live Solr nodes that the underlying {@link HttpClusterStateProvider} would be maintaining. + */ + public Builder withSolrUrl(String solrUrl) { + this.solrUrls.add(solrUrl); + return this; + } + /** + * Provide a list of Solr URL to be used when configuring {@link CloudSolrClient} instances. + * One of the provided values will be used to fetch the list of live Solr + * nodes that the underlying {@link HttpClusterStateProvider} would be maintaining. + */ + public Builder withSolrUrl(Collection solrUrls) { + this.solrUrls.addAll(solrUrls); + return this; + } + /** * Provides a {@link HttpClient} for the builder to use when creating clients. */ @@ -1722,25 +1764,22 @@ public Builder withClusterStateProvider(ClusterStateProvider stateProvider) { */ public CloudSolrClient build() { if (stateProvider == null) { - stateProvider = new ZkClientClusterStateProvider(zkHosts, zkChroot); + if (!zkHosts.isEmpty()) { + stateProvider = new ZkClientClusterStateProvider(zkHosts, zkChroot); + } + else if (!this.solrUrls.isEmpty()) { + try { + stateProvider = new HttpClusterStateProvider(solrUrls, httpClient); + } catch (Exception e) { + throw new RuntimeException("Couldn't initialize a HttpClusterStateProvider (is/are the " + + "Solr server(s), " + solrUrls + ", down?)", e); + } + } else { + throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null."); + } } - return new CloudSolrClient(zkHosts, zkChroot, httpClient, loadBalancedSolrClient, lbClientBuilder, + return new CloudSolrClient(zkHosts, zkChroot, solrUrls, httpClient, loadBalancedSolrClient, lbClientBuilder, shardLeadersOnly, directUpdatesToLeadersOnly, stateProvider); } } - - interface ClusterStateProvider extends Closeable { - - ClusterState.CollectionRef getState(String collection); - - Set liveNodes(); - - String getAlias(String collection); - - String getCollectionName(String name); - - Map getClusterProperties(); - - void connect(); - } } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java new file mode 100644 index 000000000000..b913cd4642fb --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.impl; + +import java.io.Closeable; +import java.util.Set; + +import org.apache.solr.common.cloud.ClusterState; + +public interface ClusterStateProvider extends Closeable { + + /** + * Obtain the state of the collection (cluster status). + * @return the collection state, or null is collection doesn't exist + */ + ClusterState.CollectionRef getState(String collection); + + /** + * Obtain set of live_nodes for the cluster. + */ + Set liveNodes(); + + /** + * Given an alias, returns the collection name that this alias points to + */ + String getAlias(String alias); + + /** + * Given a name, returns the collection name if an alias by that name exists, or + * returns the name itself, if no alias exists. + */ + String getCollectionName(String name); + + /** + * Obtain a cluster property, or null if it doesn't exist. + */ + Object getClusterProperty(String propertyName); + + /** + * Obtain a cluster property, or the default value if it doesn't exist. + */ + Object getClusterProperty(String propertyName, String def); + + void connect(); +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpClusterStateProvider.java new file mode 100644 index 000000000000..1fb941508940 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpClusterStateProvider.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.impl; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.apache.http.client.HttpClient; +import org.apache.solr.client.solrj.SolrClient; +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException; +import org.apache.solr.client.solrj.request.CollectionAdminRequest; +import org.apache.solr.client.solrj.request.QueryRequest; +import org.apache.solr.common.cloud.ClusterState; +import org.apache.solr.common.cloud.ClusterState.CollectionRef; +import org.apache.solr.common.cloud.ZkStateReader; +import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.common.util.NamedList; +import org.apache.solr.common.util.SimpleOrderedMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HttpClusterStateProvider implements ClusterStateProvider { + private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private String urlScheme; + volatile Set liveNodes; + long liveNodesTimestamp = 0; + volatile Map aliases; + long aliasesTimestamp = 0; + + private int cacheTimeout = 5; // the liveNodes and aliases cache will be invalidated after 5 secs + final HttpClient httpClient; + final boolean clientIsInternal; + + public HttpClusterStateProvider(List solrUrls, HttpClient httpClient) throws Exception { + this.httpClient = httpClient == null? HttpClientUtil.createClient(null): httpClient; + this.clientIsInternal = httpClient == null; + for (String solrUrl: solrUrls) { + urlScheme = solrUrl.startsWith("https")? "https": "http"; + try (SolrClient initialClient = new HttpSolrClient.Builder().withBaseSolrUrl(solrUrl).withHttpClient(httpClient).build()) { + Set liveNodes = fetchLiveNodes(initialClient); // throws exception if unable to fetch + this.liveNodes = liveNodes; + liveNodesTimestamp = System.nanoTime(); + break; + } catch (IOException e) { + log.warn("Attempt to fetch live_nodes from " + solrUrl + " failed.", e); + } + } + + if (this.liveNodes == null || this.liveNodes.isEmpty()) { + throw new RuntimeException("Tried fetching live_nodes using Solr URLs provided, i.e. " + solrUrls + ". However, " + + "succeeded in obtaining the cluster state from none of them." + + "If you think your Solr cluster is up and is accessible," + + " you could try re-creating a new CloudSolrClient using working" + + " solrUrl(s) or zkHost(s)."); + } + } + + @Override + public void close() throws IOException { + if (this.clientIsInternal && this.httpClient != null) { + HttpClientUtil.close(httpClient); + } + } + + @Override + public CollectionRef getState(String collection) { + for (String nodeName: liveNodes) { + try (HttpSolrClient client = new HttpSolrClient.Builder(). + withBaseSolrUrl(ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme)). + withHttpClient(httpClient).build()) { + ClusterState cs = fetchClusterState(client, collection); + return cs.getCollectionRef(collection); + } catch (SolrServerException | RemoteSolrException | IOException e) { + if (e.getMessage().contains(collection + " not found")) { + // Cluster state for the given collection was not found. + // Lets fetch/update our aliases: + getAliases(true); + return null; + } + log.warn("Attempt to fetch cluster state from " + + ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme) + " failed.", e); + } + } + throw new RuntimeException("Tried fetching cluster state using the node names we knew of, i.e. " + liveNodes +". However, " + + "succeeded in obtaining the cluster state from none of them." + + "If you think your Solr cluster is up and is accessible," + + " you could try re-creating a new CloudSolrClient using working" + + " solrUrl(s) or zkHost(s)."); + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + private ClusterState fetchClusterState(SolrClient client, String collection) throws SolrServerException, IOException { + ModifiableSolrParams params = new ModifiableSolrParams(); + params.set("collection", collection); + params.set("action", "CLUSTERSTATUS"); + QueryRequest request = new QueryRequest(params); + request.setPath("/admin/collections"); + NamedList cluster = (SimpleOrderedMap) client.request(request).get("cluster"); + Map collectionsMap = Collections.singletonMap(collection, + ((NamedList) cluster.get("collections")).get(collection)); + int znodeVersion = (int)((Map)(collectionsMap).get(collection)).get("znodeVersion"); + Set liveNodes = new HashSet((List)(cluster.get("live_nodes"))); + this.liveNodes = liveNodes; + liveNodesTimestamp = System.nanoTime(); + ClusterState cs = ClusterState.load(znodeVersion, collectionsMap, liveNodes, ZkStateReader.CLUSTER_STATE); + return cs; + } + + @Override + public Set liveNodes() { + if (liveNodes == null) { + throw new RuntimeException("We don't know of any live_nodes to fetch the" + + " latest live_nodes information from. " + + "If you think your Solr cluster is up and is accessible," + + " you could try re-creating a new CloudSolrClient using working" + + " solrUrl(s) or zkHost(s)."); + } + if (TimeUnit.SECONDS.convert((System.nanoTime() - liveNodesTimestamp), TimeUnit.NANOSECONDS) > getCacheTimeout()) { + for (String nodeName: liveNodes) { + try (HttpSolrClient client = new HttpSolrClient.Builder(). + withBaseSolrUrl(ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme)). + withHttpClient(httpClient).build()) { + Set liveNodes = fetchLiveNodes(client); + this.liveNodes = (liveNodes); + liveNodesTimestamp = System.nanoTime(); + return liveNodes; + } catch (Exception e) { + log.warn("Attempt to fetch live_nodes from " + + ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme) + " failed.", e); + } + } + throw new RuntimeException("Tried fetching live_nodes using all the node names we knew of, i.e. " + liveNodes +". However, " + + "succeeded in obtaining the cluster state from none of them." + + "If you think your Solr cluster is up and is accessible," + + " you could try re-creating a new CloudSolrClient using working" + + " solrUrl(s) or zkHost(s)."); + } else { + return liveNodes; // cached copy is fresh enough + } + } + + private static Set fetchLiveNodes(SolrClient client) throws Exception { + ModifiableSolrParams params = new ModifiableSolrParams(); + params.set("action", "CLUSTERSTATUS"); + QueryRequest request = new QueryRequest(params); + request.setPath("/admin/collections"); + NamedList cluster = (SimpleOrderedMap) client.request(request).get("cluster"); + Set liveNodes = new HashSet((List)(cluster.get("live_nodes"))); + return liveNodes; + } + + @Override + public String getAlias(String alias) { + Map aliases = getAliases(false); + return aliases.get(alias); + } + + private Map getAliases(boolean forceFetch) { + if (this.liveNodes == null) { + throw new RuntimeException("We don't know of any live_nodes to fetch the" + + " latest aliases information from. " + + "If you think your Solr cluster is up and is accessible," + + " you could try re-creating a new CloudSolrClient using working" + + " solrUrl(s) or zkHost(s)."); + } + + if (forceFetch || this.aliases == null || + TimeUnit.SECONDS.convert((System.nanoTime() - aliasesTimestamp), TimeUnit.NANOSECONDS) > getCacheTimeout()) { + for (String nodeName: liveNodes) { + try (HttpSolrClient client = new HttpSolrClient.Builder(). + withBaseSolrUrl(ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme)). + withHttpClient(httpClient).build()) { + + Map aliases = new CollectionAdminRequest.ListAliases().process(client).getAliases(); + this.aliases = aliases; + this.aliasesTimestamp = System.nanoTime(); + return Collections.unmodifiableMap(aliases); + } catch (SolrServerException | RemoteSolrException | IOException e) { + // Situation where we're hitting an older Solr which doesn't have LISTALIASES + if (e instanceof RemoteSolrException && ((RemoteSolrException)e).code()==400) { + log.warn("LISTALIASES not found, possibly using older Solr server. Aliases won't work" + + " unless you re-create the CloudSolrClient using zkHost(s) or upgrade Solr server", e); + this.aliases = Collections.emptyMap(); + this.aliasesTimestamp = System.nanoTime(); + return aliases; + } + log.warn("Attempt to fetch cluster state from " + + ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme) + " failed.", e); + } + } + + throw new RuntimeException("Tried fetching aliases using all the node names we knew of, i.e. " + liveNodes +". However, " + + "succeeded in obtaining the cluster state from none of them." + + "If you think your Solr cluster is up and is accessible," + + " you could try re-creating a new CloudSolrClient using a working" + + " solrUrl or zkHost."); + } else { + return Collections.unmodifiableMap(this.aliases); // cached copy is fresh enough + } + } + + @Override + public String getCollectionName(String name) { + Map aliases = getAliases(false); + return aliases.containsKey(name) ? aliases.get(name): name; + } + + @Override + public Object getClusterProperty(String propertyName) { + if (propertyName.equals(ZkStateReader.URL_SCHEME)) { + return this.urlScheme; + } + throw new UnsupportedOperationException("Fetching cluster properties not supported" + + " using the HttpClusterStateProvider. " + + "ZkClientClusterStateProvider can be used for this."); // TODO + } + + @Override + public Object getClusterProperty(String propertyName, String def) { + if (propertyName.equals(ZkStateReader.URL_SCHEME)) { + return this.urlScheme; + } + throw new UnsupportedOperationException("Fetching cluster properties not supported" + + " using the HttpClusterStateProvider. " + + "ZkClientClusterStateProvider can be used for this."); // TODO + } + + @Override + public void connect() {} + + public int getCacheTimeout() { + return cacheTimeout; + } + + public void setCacheTimeout(int cacheTimeout) { + this.cacheTimeout = cacheTimeout; + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java index b0f1a505d0d5..ed6ae7b99b25 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java @@ -360,10 +360,11 @@ public Rsp request(Req req) throws SolrServerException, IOException { boolean isNonRetryable = req.request instanceof IsUpdateRequest || ADMIN_PATHS.contains(req.request.getPath()); List skipped = null; + boolean timeAllowedExceeded = false; long timeAllowedNano = getTimeAllowedInNanos(req.getRequest()); long timeOutTime = System.nanoTime() + timeAllowedNano; for (String serverStr : req.getServers()) { - if(isTimeExceeded(timeAllowedNano, timeOutTime)) { + if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) { break; } @@ -400,7 +401,7 @@ else if (skipped.size() < numDeadServersToTry) { // try the servers we previously skipped if (skipped != null) { for (ServerWrapper wrapper : skipped) { - if(isTimeExceeded(timeAllowedNano, timeOutTime)) { + if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) { break; } @@ -417,10 +418,16 @@ else if (skipped.size() < numDeadServersToTry) { } + final String solrServerExceptionMessage; + if (timeAllowedExceeded) { + solrServerExceptionMessage = "Time allowed to handle this request exceeded"; + } else { + solrServerExceptionMessage = "No live SolrServers available to handle this request"; + } if (ex == null) { - throw new SolrServerException("No live SolrServers available to handle this request"); + throw new SolrServerException(solrServerExceptionMessage); } else { - throw new SolrServerException("No live SolrServers available to handle this request:" + zombieServers.keySet(), ex); + throw new SolrServerException(solrServerExceptionMessage+":" + zombieServers.keySet(), ex); } } @@ -593,10 +600,11 @@ public NamedList request(final SolrRequest request, String collection) int maxTries = serverList.length; Map justFailed = null; + boolean timeAllowedExceeded = false; long timeAllowedNano = getTimeAllowedInNanos(request); long timeOutTime = System.nanoTime() + timeAllowedNano; for (int attempts=0; attempts request(final SolrRequest request, String collection) // try other standard servers that we didn't try just now for (ServerWrapper wrapper : zombieServers.values()) { - if(isTimeExceeded(timeAllowedNano, timeOutTime)) { + if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) { break; } @@ -651,10 +659,16 @@ public NamedList request(final SolrRequest request, String collection) } + final String solrServerExceptionMessage; + if (timeAllowedExceeded) { + solrServerExceptionMessage = "Time allowed to handle this request exceeded"; + } else { + solrServerExceptionMessage = "No live SolrServers available to handle this request"; + } if (ex == null) { - throw new SolrServerException("No live SolrServers available to handle this request"); + throw new SolrServerException(solrServerExceptionMessage); } else { - throw new SolrServerException("No live SolrServers available to handle this request", ex); + throw new SolrServerException(solrServerExceptionMessage, ex); } } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java index 8ed1b5c45aa9..c9972894d3b7 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; -public class ZkClientClusterStateProvider implements CloudSolrClient.ClusterStateProvider { +public class ZkClientClusterStateProvider implements ClusterStateProvider { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -63,14 +63,24 @@ public Set liveNodes() { @Override - public String getAlias(String collection) { + public String getAlias(String alias) { Aliases aliases = zkStateReader.getAliases(); - return aliases.getCollectionAlias(collection); + return aliases.getCollectionAlias(alias); } @Override - public Map getClusterProperties() { - return zkStateReader.getClusterProperties(); + public Object getClusterProperty(String propertyName) { + Map props = zkStateReader.getClusterProperties(); + return props.get(propertyName); + } + + @Override + public Object getClusterProperty(String propertyName, String def) { + Map props = zkStateReader.getClusterProperties(); + if (props.containsKey(propertyName)) { + return props.get(propertyName); + } + return def; } @Override diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConversionEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConversionEvaluator.java new file mode 100644 index 000000000000..2849b49aa886 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConversionEvaluator.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.util.Locale; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class ConversionEvaluator extends ComplexEvaluator { + + enum LENGTH_CONSTANT {MILES, YARDS, FEET, INCHES, MILLIMETERS, CENTIMETERS, METERS, KILOMETERS}; + + private LENGTH_CONSTANT from; + private LENGTH_CONSTANT to; + private Convert convert; + + public ConversionEvaluator(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + + if (3 != subEvaluators.size()) { + throw new EvaluatorException(new IOException(String.format(Locale.ROOT, "Invalid expression %s - expecting 3 value but found %d", expression, subEvaluators.size()))); + } + + try { + from = LENGTH_CONSTANT.valueOf(subEvaluators.get(0).toExpression(factory).toString().toUpperCase(Locale.ROOT)); + to = LENGTH_CONSTANT.valueOf(subEvaluators.get(1).toExpression(factory).toString().toUpperCase(Locale.ROOT)); + this.convert = getConvert(from, to); + } catch (IllegalArgumentException e) { + throw new EvaluatorException(e); + } + } + + private String listParams() { + StringBuffer buf = new StringBuffer(); + for(LENGTH_CONSTANT lc : LENGTH_CONSTANT.values()) { + if(buf.length() > 0) { + buf.append(", "); + } + buf.append(lc.toString()); + } + return buf.toString(); + } + + @Override + public Object evaluate(Tuple tuple) throws IOException { + + StreamEvaluator streamEvaluator = subEvaluators.get(2); + Object tupleValue = streamEvaluator.evaluate(tuple); + + if (tupleValue == null) return null; + + Number number = (Number)tupleValue; + double d = number.doubleValue(); + return convert.convert(d); + } + + private Convert getConvert(LENGTH_CONSTANT from, LENGTH_CONSTANT to) throws IOException { + switch(from) { + case INCHES: + switch(to) { + case MILLIMETERS: + return (double d) -> d*25.4; + case CENTIMETERS: + return (double d) -> d*2.54; + case METERS: + return (double d) -> d*0.0254; + default: + throw new EvaluatorException("No conversion available from "+from+" to "+to); + } + case FEET: + switch(to) { + case METERS: + return (double d) -> d * .30; + } + case YARDS: + switch(to) { + case METERS: + return (double d) -> d * .91; + case KILOMETERS: + return (double d) -> d * 0.00091; + default: + throw new EvaluatorException("No conversion available from "+from+" to "+to); + } + case MILES: + switch(to) { + case KILOMETERS: + return (double d) -> d * 1.61; + default: + throw new EvaluatorException("No conversion available from "+from+" to "+to); + } + case MILLIMETERS: + switch (to) { + case INCHES: + return (double d) -> d * 0.039; + default: + throw new EvaluatorException("No conversion available from "+from+" to "+to); + } + case CENTIMETERS: + switch(to) { + case INCHES: + return (double d) -> d * 0.39; + default: + throw new EvaluatorException("No conversion available from "+from+" to "+to); + } + case METERS: + switch(to) { + case FEET: + return (double d) -> d * 3.28; + default: + throw new EvaluatorException("No conversion available from "+from+" to "+to); + } + case KILOMETERS: + switch(to) { + case MILES: + return (double d) -> d * 0.62; + case FEET: + return (double d) -> d * 3280.8; + } + default: + throw new EvaluatorException("No conversion available from "+from); + } + } + + private interface Convert { + public double convert(double d); + } + + @Override + public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException { + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + + for (StreamEvaluator evaluator : subEvaluators) { + expression.addParameter(evaluator.toExpression(factory)); + } + + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + return new Explanation(nodeId.toString()) + .withExpressionType(Explanation.ExpressionType.EVALUATOR) + .withImplementingClass(getClass().getName()) + .withExpression(toExpression(factory).toString()); + } +} \ No newline at end of file diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/package-info.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/EvaluatorException.java similarity index 75% rename from lucene/grouping/src/java/org/apache/lucene/search/grouping/function/package-info.java rename to solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/EvaluatorException.java index 73588ce24639..d2098c2e4a9c 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/package-info.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/EvaluatorException.java @@ -15,7 +15,16 @@ * limitations under the License. */ -/** - * Support for grouping by {@link org.apache.lucene.queries.function.ValueSource}. - */ -package org.apache.lucene.search.grouping.function; +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; + +public class EvaluatorException extends IOException { + public EvaluatorException(Throwable t) { + super(t); + } + + public EvaluatorException(String message) { + super(message); + } +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluator.java new file mode 100644 index 000000000000..4af1f3547113 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluator.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.UnsupportedTemporalTypeException; +import java.util.Date; +import java.util.Locale; +import java.util.Map; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * A generic date evaluator for use with a TemporalAccessor + */ +public abstract class TemporalEvaluator extends ComplexEvaluator { + + private String field; + + public TemporalEvaluator(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + + if (1 != subEvaluators.size()) { + throw new IOException(String.format(Locale.ROOT, "Invalid expression %s - expecting one value but found %d", expression, subEvaluators.size())); + } + } + + @Override + public Object evaluate(Tuple tuple) throws IOException { + + Instant instant = null; + TemporalAccessor date = null; + + //First evaluate the parameter + StreamEvaluator streamEvaluator = subEvaluators.get(0); + Object tupleValue = streamEvaluator.evaluate(tuple); + + if (tupleValue == null) return null; + + if(field == null) { + field = streamEvaluator.toExpression(constructingFactory).toString(); + } + + Map tupleContext = streamContext.getTupleContext(); + date = (LocalDateTime)tupleContext.get(field); // Check to see if the date has already been created for this field + + if(date == null) { + if (tupleValue instanceof String) { + instant = getInstant((String) tupleValue); + } else if (tupleValue instanceof Long) { + instant = Instant.ofEpochMilli((Long) tupleValue); + } else if (tupleValue instanceof Instant) { + instant = (Instant) tupleValue; + } else if (tupleValue instanceof Date) { + instant = ((Date) tupleValue).toInstant(); + } else if (tupleValue instanceof TemporalAccessor) { + date = ((TemporalAccessor) tupleValue); + tupleContext.put(field, date); // Cache the date in the TupleContext + } + } + + if (instant != null) { + if (TemporalEvaluatorEpoch.FUNCTION_NAME.equals(getFunction())) return instant.toEpochMilli(); + date = LocalDateTime.ofInstant(instant, ZoneOffset.UTC); + tupleContext.put(field, date); // Cache the date in the TupleContext + } + + if (date != null) { + try { + return evaluateDate(date); + } catch (UnsupportedTemporalTypeException utte) { + throw new IOException(String.format(Locale.ROOT, "It is not possible to call '%s' function on %s", getFunction(), date.getClass().getName())); + } + } + + throw new IOException(String.format(Locale.ROOT, "Invalid parameter %s - The parameter must be a string formatted ISO_INSTANT or of type Long,Instant,Date,LocalDateTime or TemporalAccessor.", String.valueOf(tupleValue))); + } + + public abstract Object evaluateDate(TemporalAccessor aDate) throws IOException; + public abstract String getFunction(); + + protected Instant getInstant(String dateStr) throws IOException { + + if (dateStr != null && !dateStr.isEmpty()) { + try { + return Instant.parse(dateStr); + } catch (DateTimeParseException e) { + throw new IOException(String.format(Locale.ROOT, "Invalid parameter %s - The String must be formatted in the ISO_INSTANT date format.", dateStr)); + } + } + return null; + } + + @Override + public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException { + StreamExpression expression = new StreamExpression(getFunction()); + + for (StreamEvaluator evaluator : subEvaluators) { + expression.addParameter(evaluator.toExpression(factory)); + } + + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + return new Explanation(nodeId.toString()) + .withExpressionType(Explanation.ExpressionType.EVALUATOR) + .withImplementingClass(getClass().getName()) + .withExpression(toExpression(factory).toString()); + } +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDay.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDay.java new file mode 100644 index 000000000000..88b50437fe98 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDay.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a day stream evaluator + */ +public class TemporalEvaluatorDay extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "day"; + + public TemporalEvaluatorDay(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.DAY_OF_MONTH); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDayOfQuarter.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDayOfQuarter.java new file mode 100644 index 000000000000..f0343770b5eb --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDayOfQuarter.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.IsoFields; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a dayOfQuarter stream evaluator + */ +public class TemporalEvaluatorDayOfQuarter extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "dayOfQuarter"; + + public TemporalEvaluatorDayOfQuarter(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(IsoFields.DAY_OF_QUARTER); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDayOfYear.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDayOfYear.java new file mode 100644 index 000000000000..339f9388b7bf --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorDayOfYear.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a dayOfYear stream evaluator + */ +public class TemporalEvaluatorDayOfYear extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "dayOfYear"; + + public TemporalEvaluatorDayOfYear(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.DAY_OF_YEAR); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorEpoch.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorEpoch.java new file mode 100644 index 000000000000..a8554b3e580e --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorEpoch.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a epoch stream evaluator + */ +public class TemporalEvaluatorEpoch extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "epoch"; + + public TemporalEvaluatorEpoch(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) throws IOException { + if (aDate instanceof LocalDateTime) { + return ((LocalDateTime)aDate).atZone(ZoneOffset.UTC).toInstant().toEpochMilli(); + } + throw new IOException(String.format(Locale.ROOT, "Unsupported function '%s' called on %s", FUNCTION_NAME, aDate.toString())); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorHour.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorHour.java new file mode 100644 index 000000000000..54808704a310 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorHour.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a hour stream evaluator + */ +public class TemporalEvaluatorHour extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "hour"; + + public TemporalEvaluatorHour(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.HOUR_OF_DAY); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorMinute.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorMinute.java new file mode 100644 index 000000000000..9438e9d960c2 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorMinute.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a minute stream evaluator + */ +public class TemporalEvaluatorMinute extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "minute"; + + public TemporalEvaluatorMinute(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.MINUTE_OF_HOUR); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorMonth.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorMonth.java new file mode 100644 index 000000000000..b9a70fef827b --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorMonth.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a month stream evaluator + */ +public class TemporalEvaluatorMonth extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "month"; + + public TemporalEvaluatorMonth(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.MONTH_OF_YEAR); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorQuarter.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorQuarter.java new file mode 100644 index 000000000000..014431153fc6 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorQuarter.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.IsoFields; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a quarter stream evaluator + */ +public class TemporalEvaluatorQuarter extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "quarter"; + + public TemporalEvaluatorQuarter(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(IsoFields.QUARTER_OF_YEAR); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorSecond.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorSecond.java new file mode 100644 index 000000000000..f5b71fcb07da --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorSecond.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a second stream evaluator + */ +public class TemporalEvaluatorSecond extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "second"; + + public TemporalEvaluatorSecond(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.SECOND_OF_MINUTE); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorWeek.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorWeek.java new file mode 100644 index 000000000000..1a2974ad57a1 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorWeek.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.IsoFields; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +/** + * Provides a week stream evaluator + */ +public class TemporalEvaluatorWeek extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "week"; + + public TemporalEvaluatorWeek(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(IsoFields.WEEK_OF_WEEK_BASED_YEAR); + } + +} diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package-info.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorYear.java similarity index 51% rename from lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package-info.java rename to solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorYear.java index 27320118d7c1..0b8d69c9b9b1 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/package-info.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TemporalEvaluatorYear.java @@ -15,7 +15,34 @@ * limitations under the License. */ +package org.apache.solr.client.solrj.io.eval; + +import java.io.IOException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + /** - * Support for grouping by indexed terms via {@link org.apache.lucene.index.DocValues}. + * Provides a year stream evaluator */ -package org.apache.lucene.search.grouping.term; +public class TemporalEvaluatorYear extends TemporalEvaluator { + + public static final String FUNCTION_NAME = "year"; + + public TemporalEvaluatorYear(StreamExpression expression, StreamFactory factory) throws IOException { + super(expression, factory); + } + + @Override + public String getFunction() { + return FUNCTION_NAME; + } + + @Override + public Object evaluateDate(TemporalAccessor aDate) { + return aDate.get(ChronoField.YEAR); + } + +} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CalculatorStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CalculatorStream.java new file mode 100644 index 000000000000..49a0809a0450 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CalculatorStream.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class CalculatorStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + private boolean finished; + + + + public CalculatorStream() throws IOException { + } + + public CalculatorStream(StreamExpression expression, StreamFactory factory) throws IOException { + } + + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException{ + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + return new StreamExplanation(getStreamNodeId().toString()) + .withFunctionName(factory.getFunctionName(this.getClass())) + .withImplementingClass(this.getClass().getName()) + .withExpressionType(ExpressionType.STREAM_DECORATOR) + .withExpression(toExpression(factory, false).toString()); + } + + public void setStreamContext(StreamContext context) { + } + + public List children() { + List l = new ArrayList(); + return l; + } + + public void open() throws IOException { + + } + + public void close() throws IOException { + } + + public Tuple read() throws IOException { + + if(finished) { + HashMap m = new HashMap(); + m.put("EOF", true); + Tuple tuple = new Tuple(m); + return tuple; + } else { + HashMap m = new HashMap(); + Tuple tuple = new Tuple(m); + finished = true; + return tuple; + } + } + + /** Return the stream sort - ie, the order in which records are returned */ + public StreamComparator getStreamSort(){ + return null; + } + + public int getCost() { + return 0; + } + + +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CellStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CellStream.java new file mode 100644 index 000000000000..fd33737c29d7 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CellStream.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class CellStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + private TupleStream stream; + private String name; + private Tuple tuple; + private Tuple EOFTuple; + + public CellStream(String name, TupleStream stream) throws IOException { + init(name, stream); + } + + public CellStream(StreamExpression expression, StreamFactory factory) throws IOException { + String name = factory.getValueOperand(expression, 0); + List streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class); + + if(streamExpressions.size() != 1){ + throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting 1 stream but found %d",expression, streamExpressions.size())); + } + + TupleStream tupleStream = factory.constructStream(streamExpressions.get(0)); + init(name, tupleStream); + } + + public String getName() { + return this.name; + } + + private void init(String name, TupleStream tupleStream) { + this.name = name; + this.stream = tupleStream; + } + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException{ + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + expression.addParameter(name); + if(includeStreams) { + expression.addParameter(((Expressible)stream).toExpression(factory)); + } + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString()); + explanation.setFunctionName(factory.getFunctionName(this.getClass())); + explanation.setImplementingClass(this.getClass().getName()); + explanation.setExpressionType(ExpressionType.STREAM_DECORATOR); + explanation.setExpression(toExpression(factory, false).toString()); + explanation.addChild(stream.toExplanation(factory)); + + return explanation; + } + + public void setStreamContext(StreamContext context) { + this.stream.setStreamContext(context); + } + + public List children() { + List l = new ArrayList(); + l.add(stream); + + return l; + } + + public Tuple read() throws IOException { + if(tuple.EOF) { + return tuple; + } else { + Tuple t = tuple; + tuple = EOFTuple; + return t; + } + } + + public void close() throws IOException { + } + + public void open() throws IOException { + try { + stream.open(); + List list = new ArrayList(); + while(true) { + Tuple tuple = stream.read(); + if(tuple.EOF) { + EOFTuple = tuple; + break; + } else { + list.add(tuple); + } + } + + Map map = new HashMap(); + map.put(name, list); + tuple = new Tuple(map); + } finally { + stream.close(); + } + } + + /** Return the stream sort - ie, the order in which records are returned */ + public StreamComparator getStreamSort(){ + return null; + } + + public int getCost() { + return 0; + } + + +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java index 7161dc402975..6d1764ade092 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java @@ -26,8 +26,6 @@ import java.util.Locale; import java.util.Map; import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; @@ -35,7 +33,6 @@ import java.util.stream.Collectors; import org.apache.solr.client.solrj.impl.CloudSolrClient; -import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder; import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.comp.ComparatorOrder; import org.apache.solr.client.solrj.io.comp.FieldComparator; @@ -52,9 +49,7 @@ import org.apache.solr.common.cloud.Aliases; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; -import org.apache.solr.common.cloud.Replica; import org.apache.solr.common.cloud.Slice; -import org.apache.solr.common.cloud.ZkCoreNodeProps; import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.MapSolrParams; import org.apache.solr.common.params.ModifiableSolrParams; @@ -178,9 +173,11 @@ public CloudSolrStream(StreamExpression expression, StreamFactory factory) throw else if(zkHostExpression.getParameter() instanceof StreamExpressionValue){ zkHost = ((StreamExpressionValue)zkHostExpression.getParameter()).getValue(); } + /* if(null == zkHost){ throw new IOException(String.format(Locale.ROOT,"invalid expression %s - zkHost not found for collection '%s'",expression,collectionName)); } + */ // We've got all the required items init(collectionName, zkHost, mParams); @@ -299,14 +296,6 @@ public void open() throws IOException { this.tuples = new TreeSet(); this.solrStreams = new ArrayList(); this.eofTuples = Collections.synchronizedMap(new HashMap()); - if (this.streamContext != null && this.streamContext.getSolrClientCache() != null) { - this.cloudSolrClient = this.streamContext.getSolrClientCache().getCloudSolrClient(zkHost); - } else { - this.cloudSolrClient = new Builder() - .withZkHost(zkHost) - .build(); - this.cloudSolrClient.connect(); - } constructStreams(); openStreams(); } @@ -400,29 +389,15 @@ public static Collection getSlices(String collectionName, ZkStateReader z protected void constructStreams() throws IOException { try { - ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); - ClusterState clusterState = zkStateReader.getClusterState(); - Collection slices = CloudSolrStream.getSlices(this.collection, zkStateReader, true); + List shardUrls = getShards(this.zkHost, this.collection, this.streamContext); ModifiableSolrParams mParams = new ModifiableSolrParams(params); mParams = adjustParams(mParams); mParams.set(DISTRIB, "false"); // We are the aggregator. - Set liveNodes = clusterState.getLiveNodes(); - for(Slice slice : slices) { - Collection replicas = slice.getReplicas(); - List shuffler = new ArrayList<>(); - for(Replica replica : replicas) { - if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())) - shuffler.add(replica); - } - - Collections.shuffle(shuffler, new Random()); - Replica rep = shuffler.get(0); - ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); - String url = zkProps.getCoreUrl(); - SolrStream solrStream = new SolrStream(url, mParams); + for(String shardUrl : shardUrls) { + SolrStream solrStream = new SolrStream(shardUrl, mParams); if(streamContext != null) { solrStream.setStreamContext(streamContext); } @@ -468,12 +443,6 @@ public void close() throws IOException { solrStream.close(); } } - - if ((this.streamContext == null || this.streamContext.getSolrClientCache() == null) && - cloudSolrClient != null) { - - cloudSolrClient.close(); - } } /** Return the stream sort - ie, the order in which records are returned */ diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/EchoStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/EchoStream.java new file mode 100644 index 000000000000..2dd95fe5c421 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/EchoStream.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class EchoStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + private boolean finished; + private String echo; + + public EchoStream(String echo) throws IOException { + this.echo = stripQuotes(echo); + } + + public EchoStream(StreamExpression expression, StreamFactory factory) throws IOException { + this.echo = stripQuotes(factory.getValueOperand(expression, 0)); + this.echo = echo.replace("\\\"", "\""); + } + + private String stripQuotes(String s){ + if(s.startsWith("\"")) { + return s.substring(1, s.length()-1); + } else { + return s; + } + } + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException{ + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + expression.addParameter("\""+echo.replace("\"", "\\\"")+"\""); + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + return new StreamExplanation(getStreamNodeId().toString()) + .withFunctionName(factory.getFunctionName(this.getClass())) + .withImplementingClass(this.getClass().getName()) + .withExpressionType(ExpressionType.STREAM_DECORATOR) + .withExpression(toExpression(factory, false).toString()); + } + + public void setStreamContext(StreamContext context) { + } + + public List children() { + List l = new ArrayList(); + return l; + } + + public void open() throws IOException { + + } + + public void close() throws IOException { + } + + public Tuple read() throws IOException { + + if(finished) { + HashMap m = new HashMap(); + m.put("EOF", true); + Tuple tuple = new Tuple(m); + return tuple; + } else { + HashMap m = new HashMap(); + m.put("echo", echo); + Tuple tuple = new Tuple(m); + finished = true; + return tuple; + } + } + + /** Return the stream sort - ie, the order in which records are returned */ + public StreamComparator getStreamSort(){ + return null; + } + + public int getCost() { + return 0; + } + + +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/EvalStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/EvalStream.java new file mode 100644 index 000000000000..9fac56f670b2 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/EvalStream.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class EvalStream extends TupleStream implements Expressible { + + private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private TupleStream stream; + private TupleStream evalStream; + + private StreamFactory streamFactory; + private StreamContext streamContext; + + public EvalStream(StreamExpression expression, StreamFactory factory) throws IOException { + // grab all parameters out + List streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class); + + if(1 != streamExpressions.size()){ + throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting a single stream but found %d",expression, streamExpressions.size())); + } + + TupleStream stream = factory.constructStream(streamExpressions.get(0)); + init(stream, factory); + } + + private void init(TupleStream tupleStream, StreamFactory factory) throws IOException{ + this.stream = tupleStream; + this.streamFactory = factory; + } + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException { + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + + // stream + if(includeStreams) { + if (stream instanceof Expressible) { + expression.addParameter(((Expressible) stream).toExpression(factory)); + } else { + throw new IOException("The EvalStream contains a non-expressible TupleStream - it cannot be converted to an expression"); + } + } + + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + return new StreamExplanation(getStreamNodeId().toString()) + .withChildren(new Explanation[]{ + stream.toExplanation(factory) + }) + .withFunctionName(factory.getFunctionName(this.getClass())) + .withImplementingClass(this.getClass().getName()) + .withExpressionType(ExpressionType.STREAM_DECORATOR) + .withExpression(toExpression(factory, false).toString()); + } + + public void setStreamContext(StreamContext streamContext) { + this.streamContext = streamContext; + this.stream.setStreamContext(streamContext); + } + + public List children() { + List l = new ArrayList(); + l.add(stream); + return l; + } + + public void open() throws IOException { + try { + stream.open(); + Tuple tuple = stream.read(); + String expr = tuple.getString("expr_s"); + + if(expr == null) { + throw new IOException("expr_s cannot be empty for the EvalStream"); + } + + evalStream = streamFactory.constructStream(expr); + evalStream.setStreamContext(streamContext); + evalStream.open(); + } finally { + stream.close(); + } + } + + public void close() throws IOException { + evalStream.close(); + } + + public Tuple read() throws IOException { + return evalStream.read(); + } + + public StreamComparator getStreamSort(){ + return stream.getStreamSort(); + } + + public int getCost() { + return 0; + } +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/GetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/GetStream.java new file mode 100644 index 000000000000..5a89f0ff2dae --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/GetStream.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class GetStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + + private StreamContext streamContext; + private String name; + private Iterator tupleIterator; + + public GetStream(String name) throws IOException { + init(name); + } + + public GetStream(StreamExpression expression, StreamFactory factory) throws IOException { + String name = factory.getValueOperand(expression, 0); + init(name); + } + + private void init(String name) { + this.name = name; + } + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException{ + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + expression.addParameter(name); + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString()); + explanation.setFunctionName(factory.getFunctionName(this.getClass())); + explanation.setImplementingClass(this.getClass().getName()); + explanation.setExpressionType(ExpressionType.STREAM_SOURCE); + explanation.setExpression(toExpression(factory, false).toString()); + return explanation; + } + + public void setStreamContext(StreamContext context) { + this.streamContext = context; + } + + public List children() { + List l = new ArrayList(); + return l; + } + + public Tuple read() throws IOException { + Map map = new HashMap(); + if(tupleIterator.hasNext()) { + Tuple t = tupleIterator.next(); + map.putAll(t.fields); + return new Tuple(map); + } else { + map.put("EOF", true); + return new Tuple(map); + } + } + + public void close() throws IOException { + } + + public void open() throws IOException { + Map> lets = streamContext.getLets(); + List tuples = lets.get(name); + tupleIterator = tuples.iterator(); + } + + /** Return the stream sort - ie, the order in which records are returned */ + public StreamComparator getStreamSort(){ + return null; + } + + public int getCost() { + return 0; + } +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/HavingStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/HavingStream.java index 35e8952ed6f5..2f74bc5fd57c 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/HavingStream.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/HavingStream.java @@ -43,6 +43,7 @@ public class HavingStream extends TupleStream implements Expressible { private TupleStream stream; private BooleanEvaluator evaluator; + private StreamContext streamContext; private transient Tuple currentGroupHead; @@ -128,6 +129,7 @@ public Explanation toExplanation(StreamFactory factory) throws IOException { } public void setStreamContext(StreamContext context) { + this.streamContext = context; this.stream.setStreamContext(context); } @@ -152,6 +154,7 @@ public Tuple read() throws IOException { return tuple; } + streamContext.getTupleContext().clear(); if(evaluator.evaluate(tuple)){ return tuple; } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java new file mode 100644 index 000000000000..3a17211c5ef1 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class LetStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + private TupleStream stream; + private List cellStreams; + private StreamContext streamContext; + + public LetStream(TupleStream stream, List cellStreams) throws IOException { + init(stream, cellStreams); + } + + public LetStream(StreamExpression expression, StreamFactory factory) throws IOException { + List streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class); + + if(streamExpressions.size() < 2){ + throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting atleast 2 streams but found %d",expression, streamExpressions.size())); + } + + TupleStream stream = null; + List cellStreams = new ArrayList(); + + for(StreamExpression streamExpression : streamExpressions) { + TupleStream s = factory.constructStream(streamExpression); + if(s instanceof CellStream) { + cellStreams.add((CellStream)s); + } else { + if(stream == null) { + stream = s; + } else { + throw new IOException("Found more then one stream that was not a CellStream"); + } + } + } + + init(stream, cellStreams); + } + + private void init(TupleStream _stream, List _cellStreams) { + this.stream = _stream; + this.cellStreams = _cellStreams; + } + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException{ + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + expression.addParameter(((Expressible) stream).toExpression(factory)); + for(CellStream cellStream : cellStreams) { + expression.addParameter(((Expressible)cellStream).toExpression(factory)); + } + + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString()); + explanation.setFunctionName(factory.getFunctionName(this.getClass())); + explanation.setImplementingClass(this.getClass().getName()); + explanation.setExpressionType(ExpressionType.STREAM_DECORATOR); + explanation.setExpression(toExpression(factory, false).toString()); + explanation.addChild(stream.toExplanation(factory)); + + return explanation; + } + + public void setStreamContext(StreamContext context) { + this.streamContext = context; + this.stream.setStreamContext(context); + } + + public List children() { + List l = new ArrayList(); + l.add(stream); + + return l; + } + + public Tuple read() throws IOException { + return stream.read(); + } + + public void close() throws IOException { + stream.close(); + } + + public void open() throws IOException { + Map> lets = streamContext.getLets(); + for(CellStream cellStream : cellStreams) { + try { + cellStream.setStreamContext(streamContext); + cellStream.open(); + Tuple tup = cellStream.read(); + String name = cellStream.getName(); + List tuples = (List)tup.get(name); + lets.put(name, tuples); + } finally { + cellStream.close(); + } + } + stream.open(); + } + + /** Return the stream sort - ie, the order in which records are returned */ + public StreamComparator getStreamSort(){ + return null; + } + + public int getCost() { + return 0; + } + + +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ListStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ListStream.java new file mode 100644 index 000000000000..e295a5815979 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ListStream.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +public class ListStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + private TupleStream[] streams; + private TupleStream currentStream; + private int streamIndex; + + public ListStream(TupleStream... streams) throws IOException { + init(streams); + } + + public ListStream(StreamExpression expression, StreamFactory factory) throws IOException { + List streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class); + TupleStream[] streams = new TupleStream[streamExpressions.size()]; + for(int idx = 0; idx < streamExpressions.size(); ++idx){ + streams[idx] = factory.constructStream(streamExpressions.get(idx)); + } + + init(streams); + } + + private void init(TupleStream ... tupleStreams) { + this.streams = tupleStreams; + } + + @Override + public StreamExpression toExpression(StreamFactory factory) throws IOException{ + return toExpression(factory, true); + } + + private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + if(includeStreams) { + for(TupleStream stream : streams) { + expression.addParameter(((Expressible)stream).toExpression(factory)); + } + } + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString()); + explanation.setFunctionName(factory.getFunctionName(this.getClass())); + explanation.setImplementingClass(this.getClass().getName()); + explanation.setExpressionType(ExpressionType.STREAM_DECORATOR); + explanation.setExpression(toExpression(factory, false).toString()); + for(TupleStream stream : streams) { + explanation.addChild(stream.toExplanation(factory)); + } + + return explanation; + } + + public void setStreamContext(StreamContext context) { + for(TupleStream stream : streams) { + stream.setStreamContext(context); + } + } + + public List children() { + List l = new ArrayList(); + for(TupleStream stream : streams) { + l.add(stream); + } + return l; + } + + public Tuple read() throws IOException { + while(true) { + if (currentStream == null) { + if (streamIndex < streams.length) { + currentStream = streams[streamIndex]; + currentStream.open(); + } else { + HashMap map = new HashMap(); + map.put("EOF", true); + return new Tuple(map); + } + } + + Tuple tuple = currentStream.read(); + if (tuple.EOF) { + currentStream.close(); + currentStream = null; + ++streamIndex; + } else { + return tuple; + } + } + } + + public void close() throws IOException { + } + + public void open() throws IOException { + + + } + + /** Return the stream sort - ie, the order in which records are returned */ + public StreamComparator getStreamSort(){ + return null; + } + + public int getCost() { + return 0; + } + + +} \ No newline at end of file diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java index 87e13549d05c..58ba248d44b9 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java @@ -18,14 +18,10 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Random; -import java.util.Set; import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.comp.FieldComparator; @@ -38,11 +34,6 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter; import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue; import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; -import org.apache.solr.common.cloud.ClusterState; -import org.apache.solr.common.cloud.Replica; -import org.apache.solr.common.cloud.Slice; -import org.apache.solr.common.cloud.ZkCoreNodeProps; -import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.ModifiableSolrParams; import static org.apache.solr.common.params.CommonParams.DISTRIB; @@ -263,27 +254,7 @@ protected void constructStreams() throws IOException { try { Object pushStream = ((Expressible) tupleStream).toExpression(streamFactory); - ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); - - Collection slices = CloudSolrStream.getSlices(this.collection, zkStateReader, true); - - ClusterState clusterState = zkStateReader.getClusterState(); - Set liveNodes = clusterState.getLiveNodes(); - - List shuffler = new ArrayList<>(); - for(Slice slice : slices) { - Collection replicas = slice.getReplicas(); - for (Replica replica : replicas) { - if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())) - shuffler.add(replica); - } - } - - if(workers > shuffler.size()) { - throw new IOException("Number of workers exceeds nodes in the worker collection"); - } - - Collections.shuffle(shuffler, new Random()); + List shardUrls = getShards(this.zkHost, this.collection, this.streamContext); for(int w=0; w selectedFields; private Map selectedEvaluators; private List operations; @@ -124,8 +126,17 @@ public SelectStream(StreamExpression expression,StreamFactory factory) throws IO selectedEvaluators.put(factory.constructEvaluator(asValueExpression), asName); handled = true; } - } - catch(Throwable e){ + } catch(Throwable e) { + Throwable t = e; + while(true) { + if(t instanceof EvaluatorException) { + throw new IOException(t); + } + t = t.getCause(); + if(t == null) { + break; + } + } // it was not handled, so treat as a non-evaluator } } @@ -213,6 +224,7 @@ public Explanation toExplanation(StreamFactory factory) throws IOException { } public void setStreamContext(StreamContext context) { + this.streamContext = context; this.stream.setStreamContext(context); Set evaluators = selectedEvaluators.keySet(); @@ -245,6 +257,14 @@ public Tuple read() throws IOException { // create a copy with the limited set of fields Tuple workingToReturn = new Tuple(new HashMap<>()); Tuple workingForEvaluators = new Tuple(new HashMap<>()); + + //Clear the TupleContext before running the evaluators. + //The TupleContext allows evaluators to cache values within the scope of a single tuple. + //For example a LocalDateTime could be parsed by one evaluator and used by other evaluators within the scope of the tuple. + //This avoids the need to create multiple LocalDateTime instances for the same tuple to satisfy a select expression. + + streamContext.getTupleContext().clear(); + for(Object fieldName : original.fields.keySet()){ workingForEvaluators.put(fieldName, original.get(fieldName)); if(selectedFields.containsKey(fieldName)){ diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StreamContext.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StreamContext.java index 6cbf09053fe6..5dcc7b32e862 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StreamContext.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StreamContext.java @@ -19,8 +19,10 @@ import java.io.Serializable; import java.util.Map; import java.util.HashMap; +import java.util.List; import org.apache.solr.client.solrj.io.ModelCache; +import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.SolrClientCache; import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; @@ -36,12 +38,18 @@ public class StreamContext implements Serializable{ private Map entries = new HashMap(); + private Map tupleContext = new HashMap(); + private Map> lets = new HashMap(); public int workerID; public int numWorkers; private SolrClientCache clientCache; private ModelCache modelCache; private StreamFactory streamFactory; + public Map> getLets(){ + return lets; + } + public Object get(Object key) { return entries.get(key); } @@ -50,6 +58,10 @@ public void put(Object key, Object value) { this.entries.put(key, value); } + public boolean containsKey(Object key) { + return entries.containsKey(key); + } + public Map getEntries() { return this.entries; } @@ -74,6 +86,10 @@ public void setStreamFactory(StreamFactory streamFactory) { this.streamFactory = streamFactory; } + public Map getTupleContext() { + return tupleContext; + } + public StreamFactory getStreamFactory() { return this.streamFactory; } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java new file mode 100644 index 000000000000..cdab2d0cbd76 --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.client.solrj.io.stream; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.stream.Collectors; + +import org.apache.solr.client.solrj.impl.CloudSolrClient; +import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder; +import org.apache.solr.client.solrj.io.SolrClientCache; +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; +import org.apache.solr.client.solrj.io.stream.expr.Expressible; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; +import org.apache.solr.client.solrj.io.stream.metrics.Metric; +import org.apache.solr.client.solrj.request.QueryRequest; +import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.common.params.SolrParams; +import org.apache.solr.common.util.NamedList; + +public class TimeSeriesStream extends TupleStream implements Expressible { + + private static final long serialVersionUID = 1; + + private String start; + private String end; + private String gap; + private String field; + + private Metric[] metrics; + private List tuples = new ArrayList(); + private int index; + private String zkHost; + private SolrParams params; + private String collection; + protected transient SolrClientCache cache; + protected transient CloudSolrClient cloudSolrClient; + + public TimeSeriesStream(String zkHost, + String collection, + SolrParams params, + Metric[] metrics, + String field, + String start, + String end, + String gap) throws IOException { + init(collection, params, field, metrics, start, end, gap, zkHost); + } + + public TimeSeriesStream(StreamExpression expression, StreamFactory factory) throws IOException{ + // grab all parameters out + String collectionName = factory.getValueOperand(expression, 0); + List namedParams = factory.getNamedOperands(expression); + StreamExpressionNamedParameter startExpression = factory.getNamedOperand(expression, "start"); + StreamExpressionNamedParameter endExpression = factory.getNamedOperand(expression, "end"); + StreamExpressionNamedParameter fieldExpression = factory.getNamedOperand(expression, "field"); + StreamExpressionNamedParameter gapExpression = factory.getNamedOperand(expression, "gap"); + StreamExpressionNamedParameter zkHostExpression = factory.getNamedOperand(expression, "zkHost"); + List metricExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class); + + String start = null; + if(startExpression != null) { + start = ((StreamExpressionValue)startExpression.getParameter()).getValue(); + } + + String end = null; + if(startExpression != null) { + end = ((StreamExpressionValue)endExpression.getParameter()).getValue(); + } + + String gap = null; + if(startExpression != null) { + gap = ((StreamExpressionValue)gapExpression.getParameter()).getValue(); + } + + String field = null; + if(startExpression != null) { + field = ((StreamExpressionValue)fieldExpression.getParameter()).getValue(); + } + + // Collection Name + if(null == collectionName){ + throw new IOException(String.format(Locale.ROOT,"invalid expression %s - collectionName expected as first operand",expression)); + } + + // Named parameters - passed directly to solr as solrparams + if(0 == namedParams.size()){ + throw new IOException(String.format(Locale.ROOT,"invalid expression %s - at least one named parameter expected. eg. 'q=*:*'",expression)); + } + + // Construct the metrics + Metric[] metrics = new Metric[metricExpressions.size()]; + for(int idx = 0; idx < metricExpressions.size(); ++idx){ + metrics[idx] = factory.constructMetric(metricExpressions.get(idx)); + } + + if(0 == metrics.length){ + throw new IOException(String.format(Locale.ROOT,"invalid expression %s - at least one metric expected.",expression,collectionName)); + } + + // pull out known named params + ModifiableSolrParams params = new ModifiableSolrParams(); + for(StreamExpressionNamedParameter namedParam : namedParams){ + if(!namedParam.getName().equals("zkHost") && !namedParam.getName().equals("start") && !namedParam.getName().equals("end") && !namedParam.getName().equals("gap")){ + params.add(namedParam.getName(), namedParam.getParameter().toString().trim()); + } + } + + // zkHost, optional - if not provided then will look into factory list to get + String zkHost = null; + if(null == zkHostExpression){ + zkHost = factory.getCollectionZkHost(collectionName); + if(zkHost == null) { + zkHost = factory.getDefaultZkHost(); + } + } + else if(zkHostExpression.getParameter() instanceof StreamExpressionValue){ + zkHost = ((StreamExpressionValue)zkHostExpression.getParameter()).getValue(); + } + if(null == zkHost){ + throw new IOException(String.format(Locale.ROOT,"invalid expression %s - zkHost not found for collection '%s'",expression,collectionName)); + } + + // We've got all the required items + init(collectionName, params, field, metrics, start, end, gap , zkHost); + } + + public String getCollection() { + return this.collection; + } + + private void init(String collection, + SolrParams params, + String field, + Metric[] metrics, + String start, + String end, + String gap, + String zkHost) throws IOException { + this.zkHost = zkHost; + this.collection = collection; + this.start = start; + this.gap = gap; + this.metrics = metrics; + this.field = field; + this.params = params; + this.end = end; + } + + @Override + public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException { + // function name + StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass())); + // collection + expression.addParameter(collection); + + // parameters + ModifiableSolrParams tmpParams = new ModifiableSolrParams(params); + + for (Entry param : tmpParams.getMap().entrySet()) { + expression.addParameter(new StreamExpressionNamedParameter(param.getKey(), + String.join(",", param.getValue()))); + } + + // metrics + for(Metric metric : metrics){ + expression.addParameter(metric.toExpression(factory)); + } + + expression.addParameter(new StreamExpressionNamedParameter("start", start)); + expression.addParameter(new StreamExpressionNamedParameter("end", end)); + expression.addParameter(new StreamExpressionNamedParameter("gap", gap)); + expression.addParameter(new StreamExpressionNamedParameter("field", gap)); + + // zkHost + expression.addParameter(new StreamExpressionNamedParameter("zkHost", zkHost)); + + return expression; + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString()); + + explanation.setFunctionName(factory.getFunctionName(this.getClass())); + explanation.setImplementingClass(this.getClass().getName()); + explanation.setExpressionType(ExpressionType.STREAM_SOURCE); + explanation.setExpression(toExpression(factory).toString()); + + // child is a datastore so add it at this point + StreamExplanation child = new StreamExplanation(getStreamNodeId() + "-datastore"); + child.setFunctionName(String.format(Locale.ROOT, "solr (%s)", collection)); + // TODO: fix this so we know the # of workers - check with Joel about a Topic's ability to be in a + // parallel stream. + + child.setImplementingClass("Solr/Lucene"); + child.setExpressionType(ExpressionType.DATASTORE); + ModifiableSolrParams tmpParams = new ModifiableSolrParams(SolrParams.toMultiMap(params.toNamedList())); + + child.setExpression(tmpParams.getMap().entrySet().stream().map(e -> String.format(Locale.ROOT, "%s=%s", e.getKey(), e.getValue())).collect(Collectors.joining(","))); + + explanation.addChild(child); + + return explanation; + } + + public void setStreamContext(StreamContext context) { + cache = context.getSolrClientCache(); + } + + public List children() { + return new ArrayList(); + } + + public void open() throws IOException { + if(cache != null) { + cloudSolrClient = cache.getCloudSolrClient(zkHost); + } else { + cloudSolrClient = new Builder() + .withZkHost(zkHost) + .build(); + } + + String json = getJsonFacetString(field, metrics, start, end, gap); + + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(params); + paramsLoc.set("json.facet", json); + paramsLoc.set("rows", "0"); + + QueryRequest request = new QueryRequest(paramsLoc); + try { + NamedList response = cloudSolrClient.request(request, collection); + getTuples(response, field, metrics); + } catch (Exception e) { + throw new IOException(e); + } + } + + public void close() throws IOException { + if(cache == null) { + cloudSolrClient.close(); + } + } + + public Tuple read() throws IOException { + if(index < tuples.size()) { + Tuple tuple = tuples.get(index); + ++index; + return tuple; + } else { + Map fields = new HashMap(); + fields.put("EOF", true); + Tuple tuple = new Tuple(fields); + return tuple; + } + } + + private String getJsonFacetString(String field, Metric[] _metrics, String start, String end, String gap) { + StringBuilder buf = new StringBuilder(); + appendJson(buf, _metrics, field, start, end, gap); + return "{"+buf.toString()+"}"; + } + + + private void appendJson(StringBuilder buf, + Metric[] _metrics, + String field, + String start, + String end, + String gap) { + buf.append('"'); + buf.append("timeseries"); + buf.append('"'); + buf.append(":{"); + buf.append("\"type\":\"range\""); + buf.append(",\"field\":\""+field+"\""); + buf.append(",\"start\":\""+start+"\""); + buf.append(",\"end\":\""+end+"\""); + buf.append(",\"gap\":\""+gap+"\""); + + buf.append(",\"facet\":{"); + int metricCount = 0; + for(Metric metric : _metrics) { + String identifier = metric.getIdentifier(); + if(!identifier.startsWith("count(")) { + if(metricCount>0) { + buf.append(","); + } + buf.append("\"facet_" + metricCount + "\":\"" +identifier+"\""); + ++metricCount; + } + } + buf.append("}}"); + } + + private void getTuples(NamedList response, + String field, + Metric[] metrics) { + + Tuple tuple = new Tuple(new HashMap()); + NamedList facets = (NamedList)response.get("facets"); + fillTuples(tuples, tuple, facets, field, metrics); + } + + private void fillTuples(List tuples, + Tuple currentTuple, + NamedList facets, + String field, + Metric[] _metrics) { + + NamedList nl = (NamedList)facets.get("timeseries"); + if(nl == null) { + return; + } + + List allBuckets = (List)nl.get("buckets"); + for(int b=0; b { + + public ListAliases() { + super(CollectionAction.LISTALIASES); + } + + @Override + protected CollectionAdminResponse createResponse(SolrClient client) { + return new CollectionAdminResponse(); + } + + } + /** * Returns a SolrRequest to get a list of collections in the cluster */ diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java index 142710a04250..0d9867cc86c9 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java @@ -269,7 +269,7 @@ public Map getRoutes(DocRouter router, return null; } String leaderUrl = urls.get(0); - LBHttpSolrClient.Req request = (LBHttpSolrClient.Req) routes + LBHttpSolrClient.Req request = routes .get(leaderUrl); if (request == null) { UpdateRequest updateRequest = new UpdateRequest(); @@ -278,6 +278,7 @@ public Map getRoutes(DocRouter router, updateRequest.setParams(params); updateRequest.setPath(getPath()); updateRequest.setBasicAuthCredentials(getBasicAuthUser(), getBasicAuthPassword()); + updateRequest.setResponseParser(getResponseParser()); request = new LBHttpSolrClient.Req(updateRequest, urls); routes.put(leaderUrl, request); } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java index 82d4d6f06d95..6821075b3660 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java @@ -16,6 +16,7 @@ */ package org.apache.solr.client.solrj.response; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -60,6 +61,16 @@ public Map> getCollectionCoresStatus() return res; } + @SuppressWarnings("unchecked") + public Map getAliases() + { + NamedList response = getResponse(); + if (response.get("aliases") != null) { + return ((Map)response.get("aliases")); + } + return Collections.emptyMap(); + } + @SuppressWarnings("unchecked") public Map> getCollectionNodesStatus() { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java index b4ee55333cc9..4ee3fdd6a62f 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java @@ -15,6 +15,7 @@ * limitations under the License. */ package org.apache.solr.client.solrj.response; + import org.apache.solr.common.util.NamedList; import java.util.ArrayList; @@ -41,7 +42,7 @@ public TermsResponse(NamedList> termsInfo) { if (val instanceof NamedList) { @SuppressWarnings("unchecked") NamedList termStats = (NamedList) val; - t = new Term(term, termStats.get("docFreq").longValue(), termStats.get("totalTermFreq").longValue()); + t = new Term(term, termStats.get("df").longValue(), termStats.get("ttf").longValue()); } else { t = new Term(term, ((Number) val).longValue()); } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java b/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java index c5595b189917..54986db5b0cb 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java @@ -159,6 +159,35 @@ public static String escapeQueryChars(String s) { return sb.toString(); } + /** + * Returns the value encoded properly so it can be appended after a
    name=
    local-param. + */ + public static String encodeLocalParamVal(String val) { + int len = val.length(); + int i = 0; + if (len > 0 && val.charAt(0) != '$') { + for (;i=len) return val; + + // We need to enclose in quotes... but now we need to escape + StringBuilder sb = new StringBuilder(val.length() + 4); + sb.append('\''); + for (i=0; i target, String collectionName, Collection slices, boolean multiCollection) { for (Slice slice : slices) { diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java index 302ee62e434b..65bd81b80d4c 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java @@ -322,6 +322,10 @@ public static ClusterState load(Integer version, byte[] bytes, Set liveN return new ClusterState(version, liveNodes, Collections.emptyMap()); } Map stateMap = (Map) Utils.fromJSON(bytes); + return load(version, stateMap, liveNodes, znode); + } + + public static ClusterState load(Integer version, Map stateMap, Set liveNodes, String znode) { Map collections = new LinkedHashMap<>(stateMap.size()); for (Entry entry : stateMap.entrySet()) { String collectionName = entry.getKey(); @@ -332,7 +336,6 @@ public static ClusterState load(Integer version, byte[] bytes, Set liveN return new ClusterState( liveNodes, collections,version); } - public static Aliases load(byte[] bytes) { if (bytes == null || bytes.length == 0) { return new Aliases(); diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java index 51db039ea34e..d79fafa3efda 100644 --- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java +++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java @@ -68,6 +68,7 @@ enum CollectionAction { SYNCSHARD(true, LockLevel.SHARD), CREATEALIAS(true, LockLevel.COLLECTION), DELETEALIAS(true, LockLevel.COLLECTION), + LISTALIASES(false, LockLevel.NONE), SPLITSHARD(true, LockLevel.SHARD), DELETESHARD(true, LockLevel.SHARD), CREATESHARD(true, LockLevel.COLLECTION), diff --git a/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java b/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java index e884a5b0ebd2..dcf5e21170ac 100644 --- a/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java +++ b/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Map; +import org.apache.solr.client.solrj.util.ClientUtils; import org.apache.solr.common.MapSerializable; import org.apache.solr.common.SolrException; import org.apache.solr.common.util.NamedList; @@ -497,7 +498,27 @@ public String toQueryString() { throw new AssertionError(e); } } - + + /** + * Generates a local-params string of the form
    {! name=value name2=value2}
    . + */ + public String toLocalParamsString() { + final StringBuilder sb = new StringBuilder(128); + sb.append("{!"); + //TODO perhaps look for 'type' and add here? but it doesn't matter. + for (final Iterator it = getParameterNamesIterator(); it.hasNext();) { + final String name = it.next(); + for (String val : getParams(name)) { + sb.append(' '); // do so even the first time; why not. + sb.append(name); // no escaping for name; it must follow "Java Identifier" rules. + sb.append('='); + sb.append(ClientUtils.encodeLocalParamVal(val)); + } + } + sb.append('}'); + return sb.toString(); + } + /** Like {@link #toQueryString()}, but only replacing enough chars so that * the URL may be unambiguously pasted back into a browser. * This method can be used to properly log query parameters without diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java b/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java index 1f16415bf0ec..2c8ebe989f74 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java @@ -79,7 +79,7 @@ public void setUp() throws Exception { System.setProperty( "solr.core1.data.dir", this.dataDir2.getCanonicalPath() ); setupCoreContainer(); - log.info("CORES=" + cores + " : " + cores.getCoreNames()); + log.info("CORES=" + cores + " : " + cores.getLoadedCoreNames()); } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java index d260b02da991..0a83138a8e62 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java @@ -117,9 +117,9 @@ private LBHttpSolrClient getMockLbHttpSolrClient(Map responses return mockLbclient; } - private CloudSolrClient.ClusterStateProvider getStateProvider(Set livenodes, + private ClusterStateProvider getStateProvider(Set livenodes, Map colls) { - return new CloudSolrClient.ClusterStateProvider() { + return new ClusterStateProvider() { @Override public ClusterState.CollectionRef getState(String collection) { return colls.get(collection); @@ -130,11 +130,6 @@ public Set liveNodes() { return livenodes; } - @Override - public Map getClusterProperties() { - return Collections.EMPTY_MAP; - } - @Override public String getAlias(String collection) { return collection; @@ -152,6 +147,16 @@ public void connect() { } public void close() throws IOException { } + + @Override + public Object getClusterProperty(String propertyName) { + return null; + } + + @Override + public Object getClusterProperty(String propertyName, String def) { + return def; + } }; } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java index d22b37c24be1..c91cb67a2fae 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java @@ -68,6 +68,7 @@ import org.apache.solr.handler.admin.CollectionsHandler; import org.apache.solr.handler.admin.ConfigSetsHandler; import org.apache.solr.handler.admin.CoreAdminHandler; +import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -90,6 +91,8 @@ public class CloudSolrClientTest extends SolrCloudTestCase { private static final int TIMEOUT = 30; private static final int NODE_COUNT = 3; + private static CloudSolrClient httpBasedCloudSolrClient = null; + @BeforeClass public static void setupCluster() throws Exception { configureCluster(NODE_COUNT) @@ -99,8 +102,21 @@ public static void setupCluster() throws Exception { CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient()); AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, TIMEOUT); + + httpBasedCloudSolrClient = new CloudSolrClient.Builder().withSolrUrl( + cluster.getJettySolrRunner(0).getBaseUrl().toString()).build(); } + @AfterClass + public static void afterClass() { + if (httpBasedCloudSolrClient != null) { + try { + httpBasedCloudSolrClient.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @Before @@ -110,6 +126,13 @@ public void cleanIndex() throws Exception { .commit(cluster.getSolrClient(), COLLECTION); } + /** + * Randomly return the cluster's ZK based CSC, or HttpClusterProvider based CSC. + */ + private CloudSolrClient getRandomClient() { + return random().nextBoolean()? cluster.getSolrClient(): httpBasedCloudSolrClient; + } + @Test public void testParallelUpdateQTime() throws Exception { UpdateRequest req = new UpdateRequest(); @@ -118,7 +141,7 @@ public void testParallelUpdateQTime() throws Exception { doc.addField("id", String.valueOf(TestUtil.nextInt(random(), 1000, 1100))); req.add(doc); } - UpdateResponse response = req.process(cluster.getSolrClient(), COLLECTION); + UpdateResponse response = req.process(getRandomClient(), COLLECTION); // See SOLR-6547, we just need to ensure that no exception is thrown here assertTrue(response.getQTime() >= 0); } @@ -143,33 +166,48 @@ public void testOverwriteOption() throws Exception { .add(new SolrInputDocument(id, "1", "a_t", "hello2"), false) .commit(cluster.getSolrClient(), "overwrite"); - resp = cluster.getSolrClient().query("overwrite", new SolrQuery("*:*")); + resp = getRandomClient().query("overwrite", new SolrQuery("*:*")); assertEquals("There should be 3 documents because there should be two id=1 docs due to overwrite=false", 3, resp.getResults().getNumFound()); } + @Test + public void testAliasHandling() throws Exception { + CloudSolrClient client = getRandomClient(); + SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc"); + client.add(COLLECTION, doc); + client.commit(COLLECTION); + + CollectionAdminRequest.createAlias("testalias", COLLECTION).process(cluster.getSolrClient()); + + // ensure that the alias has been registered + assertEquals(COLLECTION, + new CollectionAdminRequest.ListAliases().process(cluster.getSolrClient()).getAliases().get("testalias")); + + assertEquals(1, client.query(COLLECTION, params("q", "*:*")).getResults().getNumFound()); + assertEquals(1, client.query("testalias", params("q", "*:*")).getResults().getNumFound()); + } + @Test public void testHandlingOfStaleAlias() throws Exception { - try (CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress())) { - client.setDefaultCollection("misconfigured-alias"); + CloudSolrClient client = getRandomClient(); - CollectionAdminRequest.createCollection("nemesis", "conf", 2, 1).process(client); - CollectionAdminRequest.createAlias("misconfigured-alias", "nemesis").process(client); - CollectionAdminRequest.deleteCollection("nemesis").process(client); + CollectionAdminRequest.createCollection("nemesis", "conf", 2, 1).process(client); + CollectionAdminRequest.createAlias("misconfigured-alias", "nemesis").process(client); + CollectionAdminRequest.deleteCollection("nemesis").process(client); - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); - SolrInputDocument doc = new SolrInputDocument(); - doc.addField(id, Integer.toString(1)); - docs.add(doc); + SolrInputDocument doc = new SolrInputDocument(); + doc.addField(id, Integer.toString(1)); + docs.add(doc); - try { - client.add(docs); - fail("Alias points to non-existing collection, add should fail"); - } catch (SolrException e) { - assertEquals(SolrException.ErrorCode.BAD_REQUEST.code, e.code()); - assertTrue("Unexpected exception", e.getMessage().contains("Collection not found")); - } + try { + client.add("misconfigured-alias", docs); + fail("Alias points to non-existing collection, add should fail"); + } catch (SolrException e) { + assertEquals(SolrException.ErrorCode.BAD_REQUEST.code, e.code()); + assertTrue("Unexpected exception", e.getMessage().contains("Collection not found")); } } @@ -182,8 +220,8 @@ public void testRouting() throws Exception { .setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); // Test single threaded routed updates for UpdateRequest - NamedList response = cluster.getSolrClient().request(request, COLLECTION); - if (cluster.getSolrClient().isDirectUpdatesToLeadersOnly()) { + NamedList response = getRandomClient().request(request, COLLECTION); + if (getRandomClient().isDirectUpdatesToLeadersOnly()) { checkSingleServer(response); } CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response; @@ -214,11 +252,11 @@ public void testRouting() throws Exception { .deleteById("0") .deleteById("2") .commit(cluster.getSolrClient(), COLLECTION); - if (cluster.getSolrClient().isDirectUpdatesToLeadersOnly()) { + if (getRandomClient().isDirectUpdatesToLeadersOnly()) { checkSingleServer(uResponse.getResponse()); } - QueryResponse qResponse = cluster.getSolrClient().query(COLLECTION, new SolrQuery("*:*")); + QueryResponse qResponse = getRandomClient().query(COLLECTION, new SolrQuery("*:*")); SolrDocumentList docs = qResponse.getResults(); assertEquals(0, docs.getNumFound()); @@ -307,7 +345,7 @@ public void testRouting() throws Exception { ModifiableSolrParams solrParams = new ModifiableSolrParams(); solrParams.set(CommonParams.Q, "*:*"); solrParams.set(ShardParams._ROUTE_, sameShardRoutes.get(random().nextInt(sameShardRoutes.size()))); - log.info("output: {}", cluster.getSolrClient().query(COLLECTION, solrParams)); + log.info("output: {}", getRandomClient().query(COLLECTION, solrParams)); } // Request counts increase from expected nodes should aggregate to 1000, while there should be @@ -362,10 +400,10 @@ public void preferLocalShardsTest() throws Exception { .add(id, "0", "a_t", "hello1") .add(id, "2", "a_t", "hello2") .add(id, "3", "a_t", "hello2") - .commit(cluster.getSolrClient(), collectionName); + .commit(getRandomClient(), collectionName); // Run the actual test for 'preferLocalShards' - queryWithPreferLocalShards(cluster.getSolrClient(), true, collectionName); + queryWithPreferLocalShards(getRandomClient(), true, collectionName); } private void queryWithPreferLocalShards(CloudSolrClient cloudClient, @@ -418,10 +456,10 @@ private void queryWithPreferLocalShards(CloudSolrClient cloudClient, private Long getNumRequests(String baseUrl, String collectionName) throws SolrServerException, IOException { - return getNumRequests(baseUrl, collectionName, "QUERY", "standard", false); + return getNumRequests(baseUrl, collectionName, "QUERY", "standard", null, false); } - private Long getNumRequests(String baseUrl, String collectionName, String category, String key, boolean returnNumErrors) throws + private Long getNumRequests(String baseUrl, String collectionName, String category, String key, String scope, boolean returnNumErrors) throws SolrServerException, IOException { NamedList resp; @@ -437,7 +475,21 @@ private Long getNumRequests(String baseUrl, String collectionName, String catego QueryRequest req = new QueryRequest(params); resp = client.request(req); } - return (Long) resp.findRecursive("solr-mbeans", category, key, "stats", returnNumErrors ? "errors" : "requests"); + String name; + if (returnNumErrors) { + name = category + "." + (scope != null ? scope : key) + ".errors"; + } else { + name = category + "." + (scope != null ? scope : key) + ".requests"; + } + Map map = (Map)resp.findRecursive("solr-mbeans", category, key, "stats"); + if (map == null) { + return null; + } + if (scope != null) { // admin handler uses a meter instead of counter here + return (Long)map.get(name + ".count"); + } else { + return (Long) map.get(name); + } } @Test @@ -458,7 +510,7 @@ public void testNonRetryableRequests() throws Exception { for (String adminPath : adminPathToMbean.keySet()) { long errorsBefore = 0; for (JettySolrRunner runner : cluster.getJettySolrRunners()) { - Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), true); + Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true); errorsBefore += numRequests; log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl()); } @@ -475,7 +527,7 @@ public void testNonRetryableRequests() throws Exception { } long errorsAfter = 0; for (JettySolrRunner runner : cluster.getJettySolrRunners()) { - Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), true); + Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true); errorsAfter += numRequests; log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl()); } @@ -644,7 +696,7 @@ public void testVersionsAreReturned() throws Exception { .add("id", "2", "a_t", "hello2"); updateRequest.setParam(UpdateParams.VERSIONS, Boolean.TRUE.toString()); - NamedList response = updateRequest.commit(cluster.getSolrClient(), COLLECTION).getResponse(); + NamedList response = updateRequest.commit(getRandomClient(), COLLECTION).getResponse(); Object addsObject = response.get("adds"); assertNotNull("There must be a adds parameter", addsObject); @@ -663,7 +715,7 @@ public void testVersionsAreReturned() throws Exception { assertTrue("Version for id 2 must be a long", object instanceof Long); versions.put("2", (Long) object); - QueryResponse resp = cluster.getSolrClient().query(COLLECTION, new SolrQuery("*:*")); + QueryResponse resp = getRandomClient().query(COLLECTION, new SolrQuery("*:*")); assertEquals("There should be one document because overwrite=true", 2, resp.getResults().getNumFound()); for (SolrDocument doc : resp.getResults()) { @@ -674,13 +726,38 @@ public void testVersionsAreReturned() throws Exception { // assert that "deletes" are returned UpdateRequest deleteRequest = new UpdateRequest().deleteById("1"); deleteRequest.setParam(UpdateParams.VERSIONS, Boolean.TRUE.toString()); - response = deleteRequest.commit(cluster.getSolrClient(), COLLECTION).getResponse(); + response = deleteRequest.commit(getRandomClient(), COLLECTION).getResponse(); Object deletesObject = response.get("deletes"); assertNotNull("There must be a deletes parameter", deletesObject); NamedList deletes = (NamedList) deletesObject; assertEquals("There must be 1 version", 1, deletes.size()); } + @Test + public void testInitializationWithSolrUrls() throws Exception { + CloudSolrClient client = httpBasedCloudSolrClient; + SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc"); + client.add(COLLECTION, doc); + client.commit(COLLECTION); + assertEquals(1, client.query(COLLECTION, params("q", "*:*")).getResults().getNumFound()); + } + + @Test + public void testCollectionDoesntExist() throws Exception { + CloudSolrClient client = getRandomClient(); + SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc"); + try { + client.add("boguscollectionname", doc); + fail(); + } catch (SolrException ex) { + if (ex.getMessage().equals("Collection not found: boguscollectionname")) { + // pass + } else { + throw ex; + } + } + } + private static void checkSingleServer(NamedList response) { final CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response; final Map routes = rr.getRoutes(); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java index e55c83747d98..9fff33a083a5 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java @@ -27,6 +27,7 @@ import java.util.Locale; import org.apache.lucene.util.LuceneTestCase; +import org.apache.solr.client.solrj.io.SolrClientCache; import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.comp.ComparatorOrder; import org.apache.solr.client.solrj.io.comp.FieldComparator; @@ -205,6 +206,10 @@ public void testJDBCSolrMerge() throws Exception { statement.executeUpdate("insert into COUNTRIES (CODE,COUNTRY_NAME) values ('NO', 'Norway')"); statement.executeUpdate("insert into COUNTRIES (CODE,COUNTRY_NAME) values ('AL', 'Algeria')"); } + + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); // Load Solr new UpdateRequest() @@ -217,18 +222,25 @@ public void testJDBCSolrMerge() throws Exception { .withFunctionName("search", CloudSolrStream.class); List tuples; - - // Simple 1 - TupleStream jdbcStream = new JDBCStream("jdbc:hsqldb:mem:.", "select CODE,COUNTRY_NAME from COUNTRIES order by CODE", new FieldComparator("CODE", ComparatorOrder.ASCENDING)); - TupleStream selectStream = new SelectStream(jdbcStream, new HashMap(){{ put("CODE", "code_s"); put("COUNTRY_NAME", "name_s"); }}); - TupleStream searchStream = factory.constructStream("search(" + COLLECTIONORALIAS + ", fl=\"code_s,name_s\",q=\"*:*\",sort=\"code_s asc\")"); - TupleStream mergeStream = new MergeStream(new FieldComparator("code_s", ComparatorOrder.ASCENDING), new TupleStream[]{selectStream,searchStream}); - - tuples = getTuples(mergeStream); - - assertEquals(7, tuples.size()); - assertOrderOf(tuples, "code_s", "AL","CA","GB","NL","NO","NP","US"); - assertOrderOf(tuples, "name_s", "Algeria", "Canada", "Great Britian", "Netherlands", "Norway", "Nepal", "United States"); + + try { + // Simple 1 + TupleStream jdbcStream = new JDBCStream("jdbc:hsqldb:mem:.", "select CODE,COUNTRY_NAME from COUNTRIES order by CODE", new FieldComparator("CODE", ComparatorOrder.ASCENDING)); + TupleStream selectStream = new SelectStream(jdbcStream, new HashMap() {{ + put("CODE", "code_s"); + put("COUNTRY_NAME", "name_s"); + }}); + TupleStream searchStream = factory.constructStream("search(" + COLLECTIONORALIAS + ", fl=\"code_s,name_s\",q=\"*:*\",sort=\"code_s asc\")"); + TupleStream mergeStream = new MergeStream(new FieldComparator("code_s", ComparatorOrder.ASCENDING), new TupleStream[]{selectStream, searchStream}); + mergeStream.setStreamContext(streamContext); + tuples = getTuples(mergeStream); + + assertEquals(7, tuples.size()); + assertOrderOf(tuples, "code_s", "AL", "CA", "GB", "NL", "NO", "NP", "US"); + assertOrderOf(tuples, "name_s", "Algeria", "Canada", "Great Britian", "Netherlands", "Norway", "Nepal", "United States"); + } finally { + solrClientCache.close(); + } } @Test @@ -277,32 +289,41 @@ public void testJDBCSolrInnerJoinExpression() throws Exception{ String expression; TupleStream stream; List tuples; - - // Basic test - expression = - "innerJoin(" - + " select(" - + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," - + " personId_i as personId," - + " rating_f as rating" - + " )," - + " select(" - + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by PEOPLE.ID\", sort=\"ID asc\")," - + " ID as personId," - + " NAME as personName," - + " COUNTRY_NAME as country" - + " )," - + " on=\"personId\"" - + ")"; - - stream = factory.constructStream(expression); - tuples = getTuples(stream); - - assertEquals(10, tuples.size()); - assertOrderOf(tuples, "personId", 11,12,13,14,15,16,17,18,19,20); - assertOrderOf(tuples, "rating", 3.5d,5d,2.2d,4.3d,3.5d,3d,3d,4d,4.1d,4.8d); - assertOrderOf(tuples, "personName", "Emma","Grace","Hailey","Isabella","Lily","Madison","Mia","Natalie","Olivia","Samantha"); - assertOrderOf(tuples, "country", "Netherlands","United States","Netherlands","Netherlands","Netherlands","United States","United States","Netherlands","Netherlands","United States"); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + // Basic test + expression = + "innerJoin(" + + " select(" + + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," + + " personId_i as personId," + + " rating_f as rating" + + " )," + + " select(" + + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by PEOPLE.ID\", sort=\"ID asc\")," + + " ID as personId," + + " NAME as personName," + + " COUNTRY_NAME as country" + + " )," + + " on=\"personId\"" + + ")"; + + + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(10, tuples.size()); + assertOrderOf(tuples, "personId", 11, 12, 13, 14, 15, 16, 17, 18, 19, 20); + assertOrderOf(tuples, "rating", 3.5d, 5d, 2.2d, 4.3d, 3.5d, 3d, 3d, 4d, 4.1d, 4.8d); + assertOrderOf(tuples, "personName", "Emma", "Grace", "Hailey", "Isabella", "Lily", "Madison", "Mia", "Natalie", "Olivia", "Samantha"); + assertOrderOf(tuples, "country", "Netherlands", "United States", "Netherlands", "Netherlands", "Netherlands", "United States", "United States", "Netherlands", "Netherlands", "United States"); + } finally { + solrClientCache.close(); + } } @Test @@ -351,58 +372,67 @@ public void testJDBCSolrInnerJoinExpressionWithProperties() throws Exception{ String expression; TupleStream stream; List tuples; - - // Basic test for no alias - expression = - "innerJoin(" - + " select(" - + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," - + " personId_i as personId," - + " rating_f as rating" - + " )," - + " select(" - + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by PEOPLE.ID\", sort=\"ID asc\")," - + " ID as personId," - + " NAME as personName," - + " COUNTRY_NAME as country" - + " )," - + " on=\"personId\"" - + ")"; - - stream = factory.constructStream(expression); - tuples = getTuples(stream); - - assertEquals(10, tuples.size()); - assertOrderOf(tuples, "personId", 11,12,13,14,15,16,17,18,19,20); - assertOrderOf(tuples, "rating", 3.5d,5d,2.2d,4.3d,3.5d,3d,3d,4d,4.1d,4.8d); - assertOrderOf(tuples, "personName", "Emma","Grace","Hailey","Isabella","Lily","Madison","Mia","Natalie","Olivia","Samantha"); - assertOrderOf(tuples, "country", "Netherlands","United States","Netherlands","Netherlands","Netherlands","United States","United States","Netherlands","Netherlands","United States"); - - // Basic test for alias - expression = - "innerJoin(" - + " select(" - + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," - + " personId_i as personId," - + " rating_f as rating" - + " )," - + " select(" - + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID as PERSONID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by PEOPLE.ID\", sort=\"PERSONID asc\")," - + " PERSONID as personId," - + " NAME as personName," - + " COUNTRY_NAME as country" - + " )," - + " on=\"personId\"" - + ")"; - - stream = factory.constructStream(expression); - tuples = getTuples(stream); - - assertEquals(10, tuples.size()); - assertOrderOf(tuples, "personId", 11,12,13,14,15,16,17,18,19,20); - assertOrderOf(tuples, "rating", 3.5d,5d,2.2d,4.3d,3.5d,3d,3d,4d,4.1d,4.8d); - assertOrderOf(tuples, "personName", "Emma","Grace","Hailey","Isabella","Lily","Madison","Mia","Natalie","Olivia","Samantha"); - assertOrderOf(tuples, "country", "Netherlands","United States","Netherlands","Netherlands","Netherlands","United States","United States","Netherlands","Netherlands","United States"); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + // Basic test for no alias + expression = + "innerJoin(" + + " select(" + + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," + + " personId_i as personId," + + " rating_f as rating" + + " )," + + " select(" + + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by PEOPLE.ID\", sort=\"ID asc\")," + + " ID as personId," + + " NAME as personName," + + " COUNTRY_NAME as country" + + " )," + + " on=\"personId\"" + + ")"; + + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(10, tuples.size()); + assertOrderOf(tuples, "personId", 11, 12, 13, 14, 15, 16, 17, 18, 19, 20); + assertOrderOf(tuples, "rating", 3.5d, 5d, 2.2d, 4.3d, 3.5d, 3d, 3d, 4d, 4.1d, 4.8d); + assertOrderOf(tuples, "personName", "Emma", "Grace", "Hailey", "Isabella", "Lily", "Madison", "Mia", "Natalie", "Olivia", "Samantha"); + assertOrderOf(tuples, "country", "Netherlands", "United States", "Netherlands", "Netherlands", "Netherlands", "United States", "United States", "Netherlands", "Netherlands", "United States"); + + // Basic test for alias + expression = + "innerJoin(" + + " select(" + + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," + + " personId_i as personId," + + " rating_f as rating" + + " )," + + " select(" + + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID as PERSONID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by PEOPLE.ID\", sort=\"PERSONID asc\")," + + " PERSONID as personId," + + " NAME as personName," + + " COUNTRY_NAME as country" + + " )," + + " on=\"personId\"" + + ")"; + + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(10, tuples.size()); + assertOrderOf(tuples, "personId", 11, 12, 13, 14, 15, 16, 17, 18, 19, 20); + assertOrderOf(tuples, "rating", 3.5d, 5d, 2.2d, 4.3d, 3.5d, 3d, 3d, 4d, 4.1d, 4.8d); + assertOrderOf(tuples, "personName", "Emma", "Grace", "Hailey", "Isabella", "Lily", "Madison", "Mia", "Natalie", "Olivia", "Samantha"); + assertOrderOf(tuples, "country", "Netherlands", "United States", "Netherlands", "Netherlands", "Netherlands", "United States", "United States", "Netherlands", "Netherlands", "United States"); + } finally { + solrClientCache.close(); + } } @Test @@ -439,7 +469,7 @@ public void testJDBCSolrInnerJoinRollupExpression() throws Exception{ statement.executeUpdate("insert into PEOPLE (ID, NAME, COUNTRY_CODE) values (19,'Olivia','NL')"); statement.executeUpdate("insert into PEOPLE (ID, NAME, COUNTRY_CODE) values (20,'Samantha','US')"); } - + // Load solr data new UpdateRequest() .add(id, "1", "rating_f", "3.5", "personId_i", "11") @@ -457,50 +487,58 @@ public void testJDBCSolrInnerJoinRollupExpression() throws Exception{ String expression; TupleStream stream; List tuples; - - // Basic test - expression = - "rollup(" - + " hashJoin(" - + " hashed=select(" - + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," - + " personId_i as personId," - + " rating_f as rating" - + " )," - + " select(" - + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by COUNTRIES.COUNTRY_NAME\", sort=\"COUNTRIES.COUNTRY_NAME asc\")," - + " ID as personId," - + " NAME as personName," - + " COUNTRY_NAME as country" - + " )," - + " on=\"personId\"" - + " )," - + " over=\"country\"," - + " max(rating)," - + " min(rating)," - + " avg(rating)," - + " count(*)" - + ")"; - - stream = factory.constructStream(expression); - tuples = getTuples(stream); - - assertEquals(2, tuples.size()); - - Tuple tuple = tuples.get(0); - assertEquals("Netherlands",tuple.getString("country")); - assertTrue(4.3D == tuple.getDouble("max(rating)")); - assertTrue(2.2D == tuple.getDouble("min(rating)")); - assertTrue(3.6D == tuple.getDouble("avg(rating)")); - assertTrue(6D == tuple.getDouble("count(*)")); - - tuple = tuples.get(1); - assertEquals("United States",tuple.getString("country")); - assertTrue(5D == tuple.getDouble("max(rating)")); - assertTrue(3D == tuple.getDouble("min(rating)")); - assertTrue(3.95D == tuple.getDouble("avg(rating)")); - assertTrue(4D == tuple.getDouble("count(*)")); - + + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + // Basic test + expression = + "rollup(" + + " hashJoin(" + + " hashed=select(" + + " search(" + COLLECTIONORALIAS + ", fl=\"personId_i,rating_f\", q=\"rating_f:*\", sort=\"personId_i asc\")," + + " personId_i as personId," + + " rating_f as rating" + + " )," + + " select(" + + " jdbc(connection=\"jdbc:hsqldb:mem:.\", sql=\"select PEOPLE.ID, PEOPLE.NAME, COUNTRIES.COUNTRY_NAME from PEOPLE inner join COUNTRIES on PEOPLE.COUNTRY_CODE = COUNTRIES.CODE order by COUNTRIES.COUNTRY_NAME\", sort=\"COUNTRIES.COUNTRY_NAME asc\")," + + " ID as personId," + + " NAME as personName," + + " COUNTRY_NAME as country" + + " )," + + " on=\"personId\"" + + " )," + + " over=\"country\"," + + " max(rating)," + + " min(rating)," + + " avg(rating)," + + " count(*)" + + ")"; + + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(2, tuples.size()); + + Tuple tuple = tuples.get(0); + assertEquals("Netherlands", tuple.getString("country")); + assertTrue(4.3D == tuple.getDouble("max(rating)")); + assertTrue(2.2D == tuple.getDouble("min(rating)")); + assertTrue(3.6D == tuple.getDouble("avg(rating)")); + assertTrue(6D == tuple.getDouble("count(*)")); + + tuple = tuples.get(1); + assertEquals("United States", tuple.getString("country")); + assertTrue(5D == tuple.getDouble("max(rating)")); + assertTrue(3D == tuple.getDouble("min(rating)")); + assertTrue(3.95D == tuple.getDouble("avg(rating)")); + assertTrue(4D == tuple.getDouble("count(*)")); + } finally { + solrClientCache.close(); + } } @Test(expected=IOException.class) diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java index b91df8df5098..75bf92dd6278 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java @@ -24,6 +24,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.Slow; +import org.apache.solr.client.solrj.io.SolrClientCache; import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.eval.AddEvaluator; import org.apache.solr.client.solrj.io.eval.GreaterThanEvaluator; @@ -92,6 +93,9 @@ public void testSelectWithEvaluatorsStream() throws Exception { String clause; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) @@ -101,21 +105,24 @@ public void testSelectWithEvaluatorsStream() throws Exception { .withFunctionName("if", IfThenElseEvaluator.class) .withFunctionName("gt", GreaterThanEvaluator.class) ; - - // Basic test - clause = "select(" - + "id," - + "add(b_i,c_d) as result," - + "search(collection1, q=*:*, fl=\"id,a_s,b_i,c_d,d_b\", sort=\"id asc\")" - + ")"; - stream = factory.constructStream(clause); - tuples = getTuples(stream); - assertFields(tuples, "id", "result"); - assertNotFields(tuples, "a_s", "b_i", "c_d", "d_b"); - assertEquals(1, tuples.size()); - assertDouble(tuples.get(0), "result", 4.3); - assertEquals(4.3, tuples.get(0).get("result")); - + try { + // Basic test + clause = "select(" + + "id," + + "add(b_i,c_d) as result," + + "search(collection1, q=*:*, fl=\"id,a_s,b_i,c_d,d_b\", sort=\"id asc\")" + + ")"; + stream = factory.constructStream(clause); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertFields(tuples, "id", "result"); + assertNotFields(tuples, "a_s", "b_i", "c_d", "d_b"); + assertEquals(1, tuples.size()); + assertDouble(tuples.get(0), "result", 4.3); + assertEquals(4.3, tuples.get(0).get("result")); + } finally { + solrClientCache.close(); + } } protected List getTuples(TupleStream tupleStream) throws IOException { diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java index bb0bd7e101e8..c45977921ee5 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java @@ -127,65 +127,124 @@ public void testCloudSolrStream() throws Exception { StreamExpression expression; CloudSolrStream stream; List tuples; - - // Basic test - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); - assertLong(tuples.get(0), "a_i", 0); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); - // Basic w/aliases - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", aliases=\"a_i=alias.a_i, a_s=name\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + try { + // Basic test + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); - assertLong(tuples.get(0), "alias.a_i", 0); - assertString(tuples.get(0), "name", "hello0"); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + assertLong(tuples.get(0), "a_i", 0); - // Basic filtered test - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + // Basic w/aliases + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", aliases=\"a_i=alias.a_i, a_s=name\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assert(tuples.size() == 3); - assertOrder(tuples, 0, 3, 4); - assertLong(tuples.get(1), "a_i", 3); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + assertLong(tuples.get(0), "alias.a_i", 0); + assertString(tuples.get(0), "name", "hello0"); - try { - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + // Basic filtered test + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); tuples = getTuples(stream); - throw new Exception("Should be an exception here"); - } catch(Exception e) { - assertTrue(e.getMessage().contains("q param expected for search function")); - } - try { - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"blah\", sort=\"a_f asc, a_i asc\")"); + assert (tuples.size() == 3); + assertOrder(tuples, 0, 3, 4); + assertLong(tuples.get(1), "a_i", 3); + + try { + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + throw new Exception("Should be an exception here"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("q param expected for search function")); + } + + try { + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"blah\", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + throw new Exception("Should be an exception here"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("fl param expected for search function")); + } + + try { + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"blah\", fl=\"id, a_f\", sort=\"a_f\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + throw new Exception("Should be an exception here"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("Invalid sort spec")); + } + + // Test with shards param + + List shardUrls = TupleStream.getShards(cluster.getZkServer().getZkAddress(), COLLECTIONORALIAS, streamContext); + + Map> shardsMap = new HashMap(); + shardsMap.put("myCollection", shardUrls); + StreamContext context = new StreamContext(); + context.put("shards", shardsMap); + context.setSolrClientCache(solrClientCache); + + // Basic test + expression = StreamExpressionParser.parse("search(myCollection, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(context); tuples = getTuples(stream); - throw new Exception("Should be an exception here"); - } catch(Exception e) { - assertTrue(e.getMessage().contains("fl param expected for search function")); - } + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + assertLong(tuples.get(0), "a_i", 0); - try { - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"blah\", fl=\"id, a_f\", sort=\"a_f\")"); - stream = new CloudSolrStream(expression, factory); + + //Execersise the /stream hander + + //Add the shards http parameter for the myCollection + StringBuilder buf = new StringBuilder(); + for (String shardUrl : shardUrls) { + if (buf.length() > 0) { + buf.append(","); + } + buf.append(shardUrl); + } + + ModifiableSolrParams solrParams = new ModifiableSolrParams(); + solrParams.add("qt", "/stream"); + solrParams.add("expr", "search(myCollection, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + solrParams.add("myCollection.shards", buf.toString()); + SolrStream solrStream = new SolrStream(shardUrls.get(0), solrParams); + stream.setStreamContext(context); tuples = getTuples(stream); - throw new Exception("Should be an exception here"); - } catch(Exception e) { - assertTrue(e.getMessage().contains("Invalid sort spec")); - } + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + assertLong(tuples.get(0), "a_i", 0); + + } finally { + solrClientCache.close(); + } } + + + @Test public void testCloudSolrStreamWithZkHost() throws Exception { @@ -200,55 +259,66 @@ public void testCloudSolrStreamWithZkHost() throws Exception { StreamFactory factory = new StreamFactory(); StreamExpression expression; CloudSolrStream stream; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); List tuples; - - // Basic test - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", zkHost=" + cluster.getZkServer().getZkAddress() + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); - assertLong(tuples.get(0), "a_i", 0); + try { + // Basic test + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", zkHost=" + cluster.getZkServer().getZkAddress() + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - // Basic w/aliases - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", aliases=\"a_i=alias.a_i, a_s=name\", zkHost=" + cluster.getZkServer().getZkAddress() + ")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + assertLong(tuples.get(0), "a_i", 0); - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); - assertLong(tuples.get(0), "alias.a_i", 0); - assertString(tuples.get(0), "name", "hello0"); + // Basic w/aliases + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", aliases=\"a_i=alias.a_i, a_s=name\", zkHost=" + cluster.getZkServer().getZkAddress() + ")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - // Basic filtered test - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", zkHost=" - + cluster.getZkServer().getZkAddress() + ", sort=\"a_f asc, a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + assertLong(tuples.get(0), "alias.a_i", 0); + assertString(tuples.get(0), "name", "hello0"); - assert(tuples.size() == 3); - assertOrder(tuples, 0, 3, 4); - assertLong(tuples.get(1), "a_i", 3); + // Basic filtered test + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", zkHost=" + + cluster.getZkServer().getZkAddress() + ", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 3); + assertOrder(tuples, 0, 3, 4); + assertLong(tuples.get(1), "a_i", 3); - // Test a couple of multile field lists. - expression = StreamExpressionParser.parse("search(collection1, fq=\"a_s:hello0\", fq=\"a_s:hello1\", q=\"id:(*)\", " + - "zkHost=" + cluster.getZkServer().getZkAddress()+ ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals("fq clauses should have prevented any docs from coming back", tuples.size(), 0); + // Test a couple of multile field lists. + expression = StreamExpressionParser.parse("search(collection1, fq=\"a_s:hello0\", fq=\"a_s:hello1\", q=\"id:(*)\", " + + "zkHost=" + cluster.getZkServer().getZkAddress() + ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals("fq clauses should have prevented any docs from coming back", tuples.size(), 0); - expression = StreamExpressionParser.parse("search(collection1, fq=\"a_s:(hello0 OR hello1)\", q=\"id:(*)\", " + - "zkHost=" + cluster.getZkServer().getZkAddress() + ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + expression = StreamExpressionParser.parse("search(collection1, fq=\"a_s:(hello0 OR hello1)\", q=\"id:(*)\", " + + "zkHost=" + cluster.getZkServer().getZkAddress() + ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assertEquals("Combining an f1 clause should show us 2 docs", tuples.size(), 2); - - + assertEquals("Combining an f1 clause should show us 2 docs", tuples.size(), 2); + + } finally { + solrClientCache.close(); + } } @@ -315,43 +385,53 @@ public void testUniqueStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("unique", UniqueStream.class); - - // Basic test - expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f\")"); - stream = new UniqueStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 0, 1, 3, 4); - // Basic test desc - expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\"), over=\"a_f\")"); - stream = new UniqueStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 4, 3, 1, 2); - - // Basic w/multi comp - expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")"); - stream = new UniqueStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0,2,1,3,4); - - // full factory w/multi comp - stream = factory.constructStream("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")"); - tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); + try { + // Basic test + expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f\")"); + stream = new UniqueStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 4); + assertOrder(tuples, 0, 1, 3, 4); + // Basic test desc + expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\"), over=\"a_f\")"); + stream = new UniqueStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 4); + assertOrder(tuples, 4, 3, 1, 2); + + // Basic w/multi comp + expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")"); + stream = new UniqueStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + + // full factory w/multi comp + stream = factory.constructStream("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + } finally { + solrClientCache.close(); + } } @Test @@ -369,30 +449,38 @@ public void testSortStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; - - StreamFactory factory = new StreamFactory() - .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) - .withFunctionName("search", CloudSolrStream.class) - .withFunctionName("sort", SortStream.class); - - // Basic test - stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")"); - tuples = getTuples(stream); - assert(tuples.size() == 6); - assertOrder(tuples, 0, 1, 5, 2, 3, 4); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + StreamFactory factory = new StreamFactory() + .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) + .withFunctionName("search", CloudSolrStream.class) + .withFunctionName("sort", SortStream.class); - // Basic test desc - stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i desc\")"); - tuples = getTuples(stream); - assert(tuples.size() == 6); - assertOrder(tuples, 4,3,2,1,5,0); - - // Basic w/multi comp - stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc, a_f desc\")"); - tuples = getTuples(stream); - assert(tuples.size() == 6); - assertOrder(tuples, 0,5,1,2,3,4); + // Basic test + stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 6); + assertOrder(tuples, 0, 1, 5, 2, 3, 4); + // Basic test desc + stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i desc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 6); + assertOrder(tuples, 4, 3, 2, 1, 5, 0); + + // Basic w/multi comp + stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc, a_f desc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 6); + assertOrder(tuples, 0, 5, 1, 2, 3, 4); + } finally { + solrClientCache.close(); + } } @@ -411,17 +499,24 @@ public void testNullStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("null", NullStream.class); - // Basic test - stream = factory.constructStream("null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")"); - tuples = getTuples(stream); - assertTrue(tuples.size() == 1); - assertTrue(tuples.get(0).getLong("nullCount") == 6); + try { + // Basic test + stream = factory.constructStream("null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertTrue(tuples.size() == 1); + assertTrue(tuples.get(0).getLong("nullCount") == 6); + } finally { + solrClientCache.close(); + } } @@ -440,6 +535,9 @@ public void testParallelNullStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) @@ -447,24 +545,29 @@ public void testParallelNullStream() throws Exception { .withFunctionName("null", NullStream.class) .withFunctionName("parallel", ParallelStream.class); - // Basic test - stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"nullCount desc\", null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), by=\"a_i asc\"))"); - tuples = getTuples(stream); - assertTrue(tuples.size() == 2); - long nullCount = 0; - for(Tuple t : tuples) { - nullCount += t.getLong("nullCount"); - } + try { - assertEquals(nullCount, 6L); - } + // Basic test + stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"nullCount desc\", null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), by=\"a_i asc\"))"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertTrue(tuples.size() == 2); + long nullCount = 0; + for (Tuple t : tuples) { + nullCount += t.getLong("nullCount"); + } + assertEquals(nullCount, 6L); + } finally { + solrClientCache.close(); + } + } @Test public void testNulls() throws Exception { new UpdateRequest() - .add(id, "0", "a_i", "1", "a_f", "0", "s_multi", "aaa", "s_multi", "bbb", "i_multi", "100", "i_multi", "200") + .add(id, "0", "a_i", "1", "a_f", "0", "s_multi", "aaa", "s_multi", "bbb", "i_multi", "100", "i_multi", "200") .add(id, "2", "a_s", "hello2", "a_i", "3", "a_f", "0") .add(id, "3", "a_s", "hello3", "a_i", "4", "a_f", "3") .add(id, "4", "a_s", "hello4", "a_f", "4") @@ -475,49 +578,59 @@ public void testNulls() throws Exception { TupleStream stream; List tuples; Tuple tuple; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class); - // Basic test - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f, s_multi, i_multi\", qt=\"/export\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + try { + // Basic test + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f, s_multi, i_multi\", qt=\"/export\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assert(tuples.size() == 5); - assertOrder(tuples, 4, 0, 1, 2, 3); + assert (tuples.size() == 5); + assertOrder(tuples, 4, 0, 1, 2, 3); - tuple = tuples.get(0); - assertTrue("hello4".equals(tuple.getString("a_s"))); - assertNull(tuple.get("s_multi")); - assertNull(tuple.get("i_multi")); - assertNull(tuple.getLong("a_i")); + tuple = tuples.get(0); + assertTrue("hello4".equals(tuple.getString("a_s"))); + assertNull(tuple.get("s_multi")); + assertNull(tuple.get("i_multi")); + assertNull(tuple.getLong("a_i")); - tuple = tuples.get(1); - assertNull(tuple.get("a_s")); - List strings = tuple.getStrings("s_multi"); - assertNotNull(strings); - assertEquals("aaa", strings.get(0)); - assertEquals("bbb", strings.get(1)); - List longs = tuple.getLongs("i_multi"); - assertNotNull(longs); - - //test sort (asc) with null string field. Null should sort to the top. - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f, s_multi, i_multi\", qt=\"/export\", sort=\"a_s asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + tuple = tuples.get(1); + assertNull(tuple.get("a_s")); + List strings = tuple.getStrings("s_multi"); + assertNotNull(strings); + assertEquals("aaa", strings.get(0)); + assertEquals("bbb", strings.get(1)); + List longs = tuple.getLongs("i_multi"); + assertNotNull(longs); - assert(tuples.size() == 5); - assertOrder(tuples, 0, 1, 2, 3, 4); + //test sort (asc) with null string field. Null should sort to the top. + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f, s_multi, i_multi\", qt=\"/export\", sort=\"a_s asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - //test sort(desc) with null string field. Null should sort to the bottom. - expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f, s_multi, i_multi\", qt=\"/export\", sort=\"a_s desc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 1, 2, 3, 4); - assert(tuples.size() == 5); - assertOrder(tuples, 4, 3, 2, 1, 0); + //test sort(desc) with null string field. Null should sort to the bottom. + expression = StreamExpressionParser.parse("search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f, s_multi, i_multi\", qt=\"/export\", sort=\"a_s desc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 5); + assertOrder(tuples, 4, 3, 2, 1, 0); + } finally { + solrClientCache.close(); + } } @Test @@ -546,126 +659,147 @@ public void testMergeStream() throws Exception { + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\")," + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\")," + "on=\"a_f asc\")"); - stream = new MergeStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 0, 1, 3, 4); - // Basic test desc - expression = StreamExpressionParser.parse("merge(" - + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\")," - + "on=\"a_f desc\")"); - stream = new MergeStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 4, 3, 1, 0); - - // Basic w/multi comp - expression = StreamExpressionParser.parse("merge(" - + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "on=\"a_f asc, a_s asc\")"); stream = new MergeStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); - - // full factory w/multi comp - stream = factory.constructStream("merge(" - + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "on=\"a_f asc, a_s asc\")"); - tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0, 2, 1, 3, 4); - - // full factory w/multi streams - stream = factory.constructStream("merge(" - + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"id:(2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," - + "on=\"a_f asc\")"); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 0, 2, 1, 4); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - } + assert (tuples.size() == 4); + assertOrder(tuples, 0, 1, 3, 4); - @Test - public void testRankStream() throws Exception { + // Basic test desc + expression = StreamExpressionParser.parse("merge(" + + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\")," + + "on=\"a_f desc\")"); + stream = new MergeStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - new UpdateRequest() - .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0") - .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0") - .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3") - .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4") - .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1") - .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + assert (tuples.size() == 4); + assertOrder(tuples, 4, 3, 1, 0); + + // Basic w/multi comp + expression = StreamExpressionParser.parse("merge(" + + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "on=\"a_f asc, a_s asc\")"); + stream = new MergeStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + + // full factory w/multi comp + stream = factory.constructStream("merge(" + + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "on=\"a_f asc, a_s asc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 5); + assertOrder(tuples, 0, 2, 1, 3, 4); + + // full factory w/multi streams + stream = factory.constructStream("merge(" + + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"id:(2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\")," + + "on=\"a_f asc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 4); + assertOrder(tuples, 0, 2, 1, 4); + } finally { + solrClientCache.close(); + } + } + + @Test + public void testRankStream() throws Exception { + + new UpdateRequest() + .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0") + .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0") + .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3") + .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4") + .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1") + .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + + StreamExpression expression; + TupleStream stream; + List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); - StreamExpression expression; - TupleStream stream; - List tuples; - StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("unique", UniqueStream.class) .withFunctionName("top", RankStream.class); - - // Basic test - expression = StreamExpressionParser.parse("top(" - + "n=3," - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")," - + "sort=\"a_f asc, a_i asc\")"); - stream = new RankStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 3); - assertOrder(tuples, 0, 2, 1); + try { + // Basic test + expression = StreamExpressionParser.parse("top(" + + "n=3," + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")," + + "sort=\"a_f asc, a_i asc\")"); + stream = new RankStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - // Basic test desc - expression = StreamExpressionParser.parse("top(" - + "n=2," - + "unique(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\")," - + "over=\"a_f\")," - + "sort=\"a_f desc\")"); - stream = new RankStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 2); - assertOrder(tuples, 4, 3); - - // full factory - stream = factory.constructStream("top(" - + "n=4," - + "unique(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")," - + "over=\"a_f\")," - + "sort=\"a_f asc\")"); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 0,1,3,4); + assert (tuples.size() == 3); + assertOrder(tuples, 0, 2, 1); + + // Basic test desc + expression = StreamExpressionParser.parse("top(" + + "n=2," + + "unique(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\")," + + "over=\"a_f\")," + + "sort=\"a_f desc\")"); + stream = new RankStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - // full factory, switch order - stream = factory.constructStream("top(" - + "n=4," - + "unique(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\")," - + "over=\"a_f\")," - + "sort=\"a_f asc\")"); - tuples = getTuples(stream); - - assert(tuples.size() == 4); - assertOrder(tuples, 2,1,3,4); + assert (tuples.size() == 2); + assertOrder(tuples, 4, 3); + + // full factory + stream = factory.constructStream("top(" + + "n=4," + + "unique(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")," + + "over=\"a_f\")," + + "sort=\"a_f asc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 4); + assertOrder(tuples, 0, 1, 3, 4); + + // full factory, switch order + stream = factory.constructStream("top(" + + "n=4," + + "unique(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\")," + + "over=\"a_f\")," + + "sort=\"a_f asc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 4); + assertOrder(tuples, 2, 1, 3, 4); + } finally { + solrClientCache.close(); + } } @Test @@ -735,7 +869,7 @@ public void testRandomStream() throws Exception { //Exercise the /stream handler ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream")); - sParams.add("expr", "random(" + COLLECTIONORALIAS + ", q=\"*:*\", rows=\"1\", fl=\"id, a_i\")" ); + sParams.add("expr", "random(" + COLLECTIONORALIAS + ", q=\"*:*\", rows=\"1\", fl=\"id, a_i\")"); JettySolrRunner jetty = cluster.getJettySolrRunner(0); SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); List tuples4 = getTuples(solrStream); @@ -767,61 +901,69 @@ public void testReducerStream() throws Exception { List tuples; Tuple t0, t1, t2; List maps0, maps1, maps2; - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("reduce", ReducerStream.class) .withFunctionName("group", GroupOperation.class); - // basic - expression = StreamExpressionParser.parse("reduce(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f asc\")," - + "by=\"a_s\"," - + "group(sort=\"a_f desc\", n=\"4\"))"); + try { + // basic + expression = StreamExpressionParser.parse("reduce(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f asc\")," + + "by=\"a_s\"," + + "group(sort=\"a_f desc\", n=\"4\"))"); - stream = factory.constructStream(expression); - tuples = getTuples(stream); + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assert(tuples.size() == 3); + assert (tuples.size() == 3); - t0 = tuples.get(0); - maps0 = t0.getMaps("group"); - assertMaps(maps0, 9, 1, 2, 0); + t0 = tuples.get(0); + maps0 = t0.getMaps("group"); + assertMaps(maps0, 9, 1, 2, 0); - t1 = tuples.get(1); - maps1 = t1.getMaps("group"); - assertMaps(maps1, 8, 7, 5, 3); + t1 = tuples.get(1); + maps1 = t1.getMaps("group"); + assertMaps(maps1, 8, 7, 5, 3); - t2 = tuples.get(2); - maps2 = t2.getMaps("group"); - assertMaps(maps2, 6, 4); - - // basic w/spaces - expression = StreamExpressionParser.parse("reduce(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f asc\")," - + "by=\"a_s\"," + - "group(sort=\"a_i asc\", n=\"2\"))"); - stream = factory.constructStream(expression); - tuples = getTuples(stream); + t2 = tuples.get(2); + maps2 = t2.getMaps("group"); + assertMaps(maps2, 6, 4); - assert(tuples.size() == 3); + // basic w/spaces + expression = StreamExpressionParser.parse("reduce(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f asc\")," + + "by=\"a_s\"," + + "group(sort=\"a_i asc\", n=\"2\"))"); + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - t0 = tuples.get(0); - maps0 = t0.getMaps("group"); - assert(maps0.size() == 2); + assert (tuples.size() == 3); - assertMaps(maps0, 0, 1); + t0 = tuples.get(0); + maps0 = t0.getMaps("group"); + assert (maps0.size() == 2); - t1 = tuples.get(1); - maps1 = t1.getMaps("group"); - assertMaps(maps1, 3, 5); + assertMaps(maps0, 0, 1); - t2 = tuples.get(2); - maps2 = t2.getMaps("group"); - assertMaps(maps2, 4, 6); + t1 = tuples.get(1); + maps1 = t1.getMaps("group"); + assertMaps(maps1, 3, 5); + t2 = tuples.get(2); + maps2 = t2.getMaps("group"); + assertMaps(maps2, 4, 6); + } finally { + solrClientCache.close(); + } } @@ -1158,6 +1300,10 @@ public void testParallelFetchStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + TupleStream stream; List tuples; @@ -1167,57 +1313,63 @@ public void testParallelFetchStream() throws Exception { .withFunctionName("parallel", ParallelStream.class) .withFunctionName("fetch", FetchStream.class); - stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ", search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"2\", fl=\"subject\"))"); - tuples = getTuples(stream); - - assert(tuples.size() == 10); - Tuple t = tuples.get(0); - assertTrue("blah blah blah 0".equals(t.getString("subject"))); - t = tuples.get(1); - assertTrue("blah blah blah 2".equals(t.getString("subject"))); - t = tuples.get(2); - assertTrue("blah blah blah 3".equals(t.getString("subject"))); - t = tuples.get(3); - assertTrue("blah blah blah 4".equals(t.getString("subject"))); - t = tuples.get(4); - assertTrue("blah blah blah 1".equals(t.getString("subject"))); - t = tuples.get(5); - assertTrue("blah blah blah 5".equals(t.getString("subject"))); - t = tuples.get(6); - assertTrue("blah blah blah 6".equals(t.getString("subject"))); - t = tuples.get(7); - assertTrue("blah blah blah 7".equals(t.getString("subject"))); - t = tuples.get(8); - assertTrue("blah blah blah 8".equals(t.getString("subject"))); - t = tuples.get(9); - assertTrue("blah blah blah 9".equals(t.getString("subject"))); - + try { - stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ", search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"3\", fl=\"subject\"))"); - tuples = getTuples(stream); + stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ", search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"2\", fl=\"subject\"))"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assert(tuples.size() == 10); - t = tuples.get(0); - assertTrue("blah blah blah 0".equals(t.getString("subject"))); - t = tuples.get(1); - assertTrue("blah blah blah 2".equals(t.getString("subject"))); - t = tuples.get(2); - assertTrue("blah blah blah 3".equals(t.getString("subject"))); - t = tuples.get(3); - assertTrue("blah blah blah 4".equals(t.getString("subject"))); - t = tuples.get(4); - assertTrue("blah blah blah 1".equals(t.getString("subject"))); - t = tuples.get(5); - assertTrue("blah blah blah 5".equals(t.getString("subject"))); - t = tuples.get(6); - assertTrue("blah blah blah 6".equals(t.getString("subject"))); - t = tuples.get(7); - assertTrue("blah blah blah 7".equals(t.getString("subject"))); - t = tuples.get(8); - assertTrue("blah blah blah 8".equals(t.getString("subject"))); - t = tuples.get(9); - assertTrue("blah blah blah 9".equals(t.getString("subject"))); + assert (tuples.size() == 10); + Tuple t = tuples.get(0); + assertTrue("blah blah blah 0".equals(t.getString("subject"))); + t = tuples.get(1); + assertTrue("blah blah blah 2".equals(t.getString("subject"))); + t = tuples.get(2); + assertTrue("blah blah blah 3".equals(t.getString("subject"))); + t = tuples.get(3); + assertTrue("blah blah blah 4".equals(t.getString("subject"))); + t = tuples.get(4); + assertTrue("blah blah blah 1".equals(t.getString("subject"))); + t = tuples.get(5); + assertTrue("blah blah blah 5".equals(t.getString("subject"))); + t = tuples.get(6); + assertTrue("blah blah blah 6".equals(t.getString("subject"))); + t = tuples.get(7); + assertTrue("blah blah blah 7".equals(t.getString("subject"))); + t = tuples.get(8); + assertTrue("blah blah blah 8".equals(t.getString("subject"))); + t = tuples.get(9); + assertTrue("blah blah blah 9".equals(t.getString("subject"))); + + + stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ", search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"3\", fl=\"subject\"))"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + t = tuples.get(0); + assertTrue("blah blah blah 0".equals(t.getString("subject"))); + t = tuples.get(1); + assertTrue("blah blah blah 2".equals(t.getString("subject"))); + t = tuples.get(2); + assertTrue("blah blah blah 3".equals(t.getString("subject"))); + t = tuples.get(3); + assertTrue("blah blah blah 4".equals(t.getString("subject"))); + t = tuples.get(4); + assertTrue("blah blah blah 1".equals(t.getString("subject"))); + t = tuples.get(5); + assertTrue("blah blah blah 5".equals(t.getString("subject"))); + t = tuples.get(6); + assertTrue("blah blah blah 6".equals(t.getString("subject"))); + t = tuples.get(7); + assertTrue("blah blah blah 7".equals(t.getString("subject"))); + t = tuples.get(8); + assertTrue("blah blah blah 8".equals(t.getString("subject"))); + t = tuples.get(9); + assertTrue("blah blah blah 9".equals(t.getString("subject"))); + } finally { + solrClientCache.close(); + } } @@ -1260,87 +1412,91 @@ public void testDaemonStream() throws Exception { + "sum(a_i)" + "), id=\"test\", runInterval=\"1000\", queueSize=\"9\")"); daemonStream = (DaemonStream)factory.constructStream(expression); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + daemonStream.setStreamContext(streamContext); + try { + //Test Long and Double Sums + daemonStream.open(); // This will start the daemon thread - //Test Long and Double Sums - - daemonStream.open(); // This will start the daemon thread - - for(int i=0; i<4; i++) { - Tuple tuple = daemonStream.read(); // Reads from the queue - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - - //System.out.println("#################################### Bucket 1:"+bucket); - assertTrue(bucket.equals("hello0")); - assertTrue(sumi.doubleValue() == 17.0D); - - tuple = daemonStream.read(); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - - //System.out.println("#################################### Bucket 2:"+bucket); - assertTrue(bucket.equals("hello3")); - assertTrue(sumi.doubleValue() == 38.0D); + for (int i = 0; i < 4; i++) { + Tuple tuple = daemonStream.read(); // Reads from the queue + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); - tuple = daemonStream.read(); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - //System.out.println("#################################### Bucket 3:"+bucket); - assertTrue(bucket.equals("hello4")); - assertTrue(sumi.longValue() == 15); - } + //System.out.println("#################################### Bucket 1:"+bucket); + assertTrue(bucket.equals("hello0")); + assertTrue(sumi.doubleValue() == 17.0D); - //Now lets wait until the internal queue fills up + tuple = daemonStream.read(); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); - while(daemonStream.remainingCapacity() > 0) { - try { - Thread.sleep(1000); - } catch (Exception e) { + //System.out.println("#################################### Bucket 2:"+bucket); + assertTrue(bucket.equals("hello3")); + assertTrue(sumi.doubleValue() == 38.0D); + tuple = daemonStream.read(); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + //System.out.println("#################################### Bucket 3:"+bucket); + assertTrue(bucket.equals("hello4")); + assertTrue(sumi.longValue() == 15); } - } - - //OK capacity is full, let's index a new doc - new UpdateRequest() - .add(id, "10", "a_s", "hello0", "a_i", "1", "a_f", "10") - .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + //Now lets wait until the internal queue fills up - //Now lets clear the existing docs in the queue 9, plus 3 more to get passed the run that was blocked. The next run should - //have the tuples with the updated count. - for(int i=0; i<12;i++) { - daemonStream.read(); - } + while (daemonStream.remainingCapacity() > 0) { + try { + Thread.sleep(1000); + } catch (Exception e) { - //And rerun the loop. It should have a new count for hello0 - for(int i=0; i<4; i++) { - Tuple tuple = daemonStream.read(); // Reads from the queue - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); + } + } - //System.out.println("#################################### Bucket 1:"+bucket); - assertTrue(bucket.equals("hello0")); - assertTrue(sumi.doubleValue() == 18.0D); + //OK capacity is full, let's index a new doc - tuple = daemonStream.read(); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); + new UpdateRequest() + .add(id, "10", "a_s", "hello0", "a_i", "1", "a_f", "10") + .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //System.out.println("#################################### Bucket 2:"+bucket); - assertTrue(bucket.equals("hello3")); - assertTrue(sumi.doubleValue() == 38.0D); + //Now lets clear the existing docs in the queue 9, plus 3 more to get passed the run that was blocked. The next run should + //have the tuples with the updated count. + for (int i = 0; i < 12; i++) { + daemonStream.read(); + } - tuple = daemonStream.read(); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - //System.out.println("#################################### Bucket 3:"+bucket); - assertTrue(bucket.equals("hello4")); - assertTrue(sumi.longValue() == 15); + //And rerun the loop. It should have a new count for hello0 + for (int i = 0; i < 4; i++) { + Tuple tuple = daemonStream.read(); // Reads from the queue + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + + //System.out.println("#################################### Bucket 1:"+bucket); + assertTrue(bucket.equals("hello0")); + assertTrue(sumi.doubleValue() == 18.0D); + + tuple = daemonStream.read(); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + + //System.out.println("#################################### Bucket 2:"+bucket); + assertTrue(bucket.equals("hello3")); + assertTrue(sumi.doubleValue() == 38.0D); + + tuple = daemonStream.read(); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + //System.out.println("#################################### Bucket 3:"+bucket); + assertTrue(bucket.equals("hello4")); + assertTrue(sumi.longValue() == 15); + } + } finally { + daemonStream.close(); //This should stop the daemon thread + solrClientCache.close(); } - - daemonStream.close(); //This should stop the daemon thread - } @@ -1412,96 +1568,103 @@ public void testRollupStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + expression = StreamExpressionParser.parse("rollup(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\")," + + "over=\"a_s\"," + + "sum(a_i)," + + "sum(a_f)," + + "min(a_i)," + + "min(a_f)," + + "max(a_i)," + + "max(a_f)," + + "avg(a_i)," + + "avg(a_f)," + + "count(*)," + + ")"); + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - expression = StreamExpressionParser.parse("rollup(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\")," - + "over=\"a_s\"," - + "sum(a_i)," - + "sum(a_f)," - + "min(a_i)," - + "min(a_f)," - + "max(a_i)," - + "max(a_f)," - + "avg(a_i)," - + "avg(a_f)," - + "count(*)," - + ")"); - stream = factory.constructStream(expression); - tuples = getTuples(stream); - - assert(tuples.size() == 3); + assert (tuples.size() == 3); - //Test Long and Double Sums + //Test Long and Double Sums - Tuple tuple = tuples.get(0); - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); + Tuple tuple = tuples.get(0); + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + Double sumf = tuple.getDouble("sum(a_f)"); + Double mini = tuple.getDouble("min(a_i)"); + Double minf = tuple.getDouble("min(a_f)"); + Double maxi = tuple.getDouble("max(a_i)"); + Double maxf = tuple.getDouble("max(a_f)"); + Double avgi = tuple.getDouble("avg(a_i)"); + Double avgf = tuple.getDouble("avg(a_f)"); + Double count = tuple.getDouble("count(*)"); - assertTrue(bucket.equals("hello0")); - assertTrue(sumi.doubleValue() == 17.0D); - assertTrue(sumf.doubleValue() == 18.0D); - assertTrue(mini.doubleValue() == 0.0D); - assertTrue(minf.doubleValue() == 1.0D); - assertTrue(maxi.doubleValue() == 14.0D); - assertTrue(maxf.doubleValue() == 10.0D); - assertTrue(avgi.doubleValue() == 4.25D); - assertTrue(avgf.doubleValue() == 4.5D); - assertTrue(count.doubleValue() == 4); + assertTrue(bucket.equals("hello0")); + assertTrue(sumi.doubleValue() == 17.0D); + assertTrue(sumf.doubleValue() == 18.0D); + assertTrue(mini.doubleValue() == 0.0D); + assertTrue(minf.doubleValue() == 1.0D); + assertTrue(maxi.doubleValue() == 14.0D); + assertTrue(maxf.doubleValue() == 10.0D); + assertTrue(avgi.doubleValue() == 4.25D); + assertTrue(avgf.doubleValue() == 4.5D); + assertTrue(count.doubleValue() == 4); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); + assertTrue(bucket.equals("hello3")); + assertTrue(sumi.doubleValue() == 38.0D); + assertTrue(sumf.doubleValue() == 26.0D); + assertTrue(mini.doubleValue() == 3.0D); + assertTrue(minf.doubleValue() == 3.0D); + assertTrue(maxi.doubleValue() == 13.0D); + assertTrue(maxf.doubleValue() == 9.0D); + assertTrue(avgi.doubleValue() == 9.5D); + assertTrue(avgf.doubleValue() == 6.5D); + assertTrue(count.doubleValue() == 4); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); - assertTrue(bucket.equals("hello3")); - assertTrue(sumi.doubleValue() == 38.0D); - assertTrue(sumf.doubleValue() == 26.0D); - assertTrue(mini.doubleValue() == 3.0D); - assertTrue(minf.doubleValue() == 3.0D); - assertTrue(maxi.doubleValue() == 13.0D); - assertTrue(maxf.doubleValue() == 9.0D); - assertTrue(avgi.doubleValue() == 9.5D); - assertTrue(avgf.doubleValue() == 6.5D); - assertTrue(count.doubleValue() == 4); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertTrue(bucket.equals("hello4")); - assertTrue(sumi.longValue() == 15); - assertTrue(sumf.doubleValue() == 11.0D); - assertTrue(mini.doubleValue() == 4.0D); - assertTrue(minf.doubleValue() == 4.0D); - assertTrue(maxi.doubleValue() == 11.0D); - assertTrue(maxf.doubleValue() == 7.0D); - assertTrue(avgi.doubleValue() == 7.5D); - assertTrue(avgf.doubleValue() == 5.5D); - assertTrue(count.doubleValue() == 2); + assertTrue(bucket.equals("hello4")); + assertTrue(sumi.longValue() == 15); + assertTrue(sumf.doubleValue() == 11.0D); + assertTrue(mini.doubleValue() == 4.0D); + assertTrue(minf.doubleValue() == 4.0D); + assertTrue(maxi.doubleValue() == 11.0D); + assertTrue(maxf.doubleValue() == 7.0D); + assertTrue(avgi.doubleValue() == 7.5D); + assertTrue(avgf.doubleValue() == 5.5D); + assertTrue(count.doubleValue() == 2); + } finally { + solrClientCache.close(); + } } @Test @@ -1588,18 +1751,27 @@ public void testParallelUniqueStream() throws Exception { .withFunctionName("top", RankStream.class) .withFunctionName("group", ReducerStream.class) .withFunctionName("parallel", ParallelStream.class); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + - ParallelStream pstream = (ParallelStream)streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_f asc\")"); - List tuples = getTuples(pstream); - assert(tuples.size() == 5); - assertOrder(tuples, 0, 1, 3, 4, 6); + try { - //Test the eofTuples + ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_f asc\")"); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 1, 3, 4, 6); - Map eofTuples = pstream.getEofTuples(); - assert(eofTuples.size() == 2); //There should be an EOF tuple for each worker. + //Test the eofTuples + Map eofTuples = pstream.getEofTuples(); + assert (eofTuples.size() == 2); //There should be an EOF tuple for each worker. + } finally { + solrClientCache.close(); + } } @Test @@ -1666,23 +1838,32 @@ public void testParallelShuffleStream() throws Exception { .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost) .withFunctionName("shuffle", ShuffleStream.class) .withFunctionName("unique", UniqueStream.class) .withFunctionName("parallel", ParallelStream.class); - ParallelStream pstream = (ParallelStream)streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(shuffle(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_f asc\")"); - - List tuples = getTuples(pstream); - assert(tuples.size() == 6); - assertOrder(tuples, 0, 1, 3, 4, 6, 56); - - //Test the eofTuples - - Map eofTuples = pstream.getEofTuples(); - assert(eofTuples.size() == 2); //There should be an EOF tuple for each worker. - assert(pstream.toExpression(streamFactory).toString().contains("shuffle")); + try { + ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(shuffle(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_f asc\")"); + pstream.setStreamFactory(streamFactory); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); + assert (tuples.size() == 6); + assertOrder(tuples, 0, 1, 3, 4, 6, 56); + + //Test the eofTuples + + Map eofTuples = pstream.getEofTuples(); + assert (eofTuples.size() == 2); //There should be an EOF tuple for each worker. + assert (pstream.toExpression(streamFactory).toString().contains("shuffle")); + } finally { + solrClientCache.close(); + } } @@ -1702,6 +1883,11 @@ public void testParallelReducerStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost) .withFunctionName("search", CloudSolrStream.class) @@ -1709,54 +1895,62 @@ public void testParallelReducerStream() throws Exception { .withFunctionName("reduce", ReducerStream.class) .withFunctionName("parallel", ParallelStream.class); - ParallelStream pstream = (ParallelStream)streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", " + - "reduce(" + - "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc,a_f asc\", partitionKeys=\"a_s\"), " + - "by=\"a_s\"," + - "group(sort=\"a_i asc\", n=\"5\")), " + - "workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_s asc\")"); - List tuples = getTuples(pstream); + try { + ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", " + + "reduce(" + + "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc,a_f asc\", partitionKeys=\"a_s\"), " + + "by=\"a_s\"," + + "group(sort=\"a_i asc\", n=\"5\")), " + + "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_s asc\")"); + + pstream.setStreamContext(streamContext); - assert(tuples.size() == 3); + List tuples = getTuples(pstream); - Tuple t0 = tuples.get(0); - List maps0 = t0.getMaps("group"); - assertMaps(maps0, 0, 1, 2, 9); + assert (tuples.size() == 3); - Tuple t1 = tuples.get(1); - List maps1 = t1.getMaps("group"); - assertMaps(maps1, 3, 5, 7, 8); + Tuple t0 = tuples.get(0); + List maps0 = t0.getMaps("group"); + assertMaps(maps0, 0, 1, 2, 9); - Tuple t2 = tuples.get(2); - List maps2 = t2.getMaps("group"); - assertMaps(maps2, 4, 6); + Tuple t1 = tuples.get(1); + List maps1 = t1.getMaps("group"); + assertMaps(maps1, 3, 5, 7, 8); + Tuple t2 = tuples.get(2); + List maps2 = t2.getMaps("group"); + assertMaps(maps2, 4, 6); - pstream = (ParallelStream)streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", " + - "reduce(" + - "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i,a_f\", sort=\"a_s desc,a_f asc\", partitionKeys=\"a_s\"), " + - "by=\"a_s\", " + - "group(sort=\"a_i desc\", n=\"5\")),"+ - "workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_s desc\")"); - tuples = getTuples(pstream); + pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", " + + "reduce(" + + "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i,a_f\", sort=\"a_s desc,a_f asc\", partitionKeys=\"a_s\"), " + + "by=\"a_s\", " + + "group(sort=\"a_i desc\", n=\"5\"))," + + "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_s desc\")"); - assert(tuples.size() == 3); + pstream.setStreamContext(streamContext); + tuples = getTuples(pstream); + + assert (tuples.size() == 3); - t0 = tuples.get(0); - maps0 = t0.getMaps("group"); - assertMaps(maps0, 6, 4); + t0 = tuples.get(0); + maps0 = t0.getMaps("group"); + assertMaps(maps0, 6, 4); - t1 = tuples.get(1); - maps1 = t1.getMaps("group"); - assertMaps(maps1, 8, 7, 5, 3); + t1 = tuples.get(1); + maps1 = t1.getMaps("group"); + assertMaps(maps1, 8, 7, 5, 3); - t2 = tuples.get(2); - maps2 = t2.getMaps("group"); - assertMaps(maps2, 9, 2, 1, 0); + t2 = tuples.get(2); + maps2 = t2.getMaps("group"); + assertMaps(maps2, 9, 2, 1, 0); + } finally { + solrClientCache.close(); + } } @@ -1784,17 +1978,24 @@ public void testParallelRankStream() throws Exception { .withFunctionName("group", ReducerStream.class) .withFunctionName("parallel", ParallelStream.class); - ParallelStream pstream = (ParallelStream)streamFactory.constructStream("parallel(" - + COLLECTIONORALIAS + ", " - + "top(" + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + + COLLECTIONORALIAS + ", " + + "top(" + "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), " + "n=\"11\", " - + "sort=\"a_i desc\"), workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_i desc\")"); - - List tuples = getTuples(pstream); + + "sort=\"a_i desc\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i desc\")"); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); - assert(tuples.size() == 10); - assertOrder(tuples, 10,9,8,7,6,5,4,3,2,0); + assert (tuples.size() == 10); + assertOrder(tuples, 10, 9, 8, 7, 6, 5, 4, 3, 2, 0); + } finally { + solrClientCache.close(); + } } @@ -1823,24 +2024,29 @@ public void testParallelMergeStream() throws Exception { .withFunctionName("merge", MergeStream.class) .withFunctionName("parallel", ParallelStream.class); - //Test ascending - ParallelStream pstream = (ParallelStream)streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", merge(search(" + COLLECTIONORALIAS + ", q=\"id:(4 1 8 7 9)\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), search(" + COLLECTIONORALIAS + ", q=\"id:(0 2 3 6)\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), on=\"a_i asc\"), workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_i asc\")"); - - List tuples = getTuples(pstream); - - - - assert(tuples.size() == 9); - assertOrder(tuples, 0, 1, 2, 3, 4, 7, 6, 8, 9); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + //Test ascending + ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", merge(search(" + COLLECTIONORALIAS + ", q=\"id:(4 1 8 7 9)\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), search(" + COLLECTIONORALIAS + ", q=\"id:(0 2 3 6)\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), on=\"a_i asc\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i asc\")"); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); - //Test descending + assert (tuples.size() == 9); + assertOrder(tuples, 0, 1, 2, 3, 4, 7, 6, 8, 9); - pstream = (ParallelStream)streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", merge(search(" + COLLECTIONORALIAS + ", q=\"id:(4 1 8 9)\", fl=\"id,a_s,a_i\", sort=\"a_i desc\", partitionKeys=\"a_i\"), search(" + COLLECTIONORALIAS + ", q=\"id:(0 2 3 6)\", fl=\"id,a_s,a_i\", sort=\"a_i desc\", partitionKeys=\"a_i\"), on=\"a_i desc\"), workers=\"2\", zkHost=\""+zkHost+"\", sort=\"a_i desc\")"); + //Test descending - tuples = getTuples(pstream); + pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", merge(search(" + COLLECTIONORALIAS + ", q=\"id:(4 1 8 9)\", fl=\"id,a_s,a_i\", sort=\"a_i desc\", partitionKeys=\"a_i\"), search(" + COLLECTIONORALIAS + ", q=\"id:(0 2 3 6)\", fl=\"id,a_s,a_i\", sort=\"a_i desc\", partitionKeys=\"a_i\"), on=\"a_i desc\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i desc\")"); + pstream.setStreamContext(streamContext); + tuples = getTuples(pstream); - assert(tuples.size() == 8); - assertOrder(tuples, 9, 8, 6, 4, 3, 2, 1, 0); + assert (tuples.size() == 8); + assertOrder(tuples, 9, 8, 6, 4, 3, 2, 1, 0); + } finally { + solrClientCache.close(); + } } @@ -1869,104 +2075,115 @@ public void testParallelRollupStream() throws Exception { .withFunctionName("min", MinMetric.class) .withFunctionName("max", MaxMetric.class) .withFunctionName("avg", MeanMetric.class) - .withFunctionName("count", CountMetric.class); - + .withFunctionName("count", CountMetric.class); + + + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamExpression expression; TupleStream stream; List tuples; - expression = StreamExpressionParser.parse("parallel(" + COLLECTIONORALIAS + "," - + "rollup(" - + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\", partitionKeys=\"a_s\")," - + "over=\"a_s\"," - + "sum(a_i)," - + "sum(a_f)," - + "min(a_i)," - + "min(a_f)," - + "max(a_i)," - + "max(a_f)," - + "avg(a_i)," - + "avg(a_f)," - + "count(*)" - + ")," - + "workers=\"2\", zkHost=\""+cluster.getZkServer().getZkAddress()+"\", sort=\"a_s asc\")" - ); - stream = factory.constructStream(expression); - tuples = getTuples(stream); - - assert(tuples.size() == 3); + try { + expression = StreamExpressionParser.parse("parallel(" + COLLECTIONORALIAS + "," + + "rollup(" + + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\", partitionKeys=\"a_s\")," + + "over=\"a_s\"," + + "sum(a_i)," + + "sum(a_f)," + + "min(a_i)," + + "min(a_f)," + + "max(a_i)," + + "max(a_f)," + + "avg(a_i)," + + "avg(a_f)," + + "count(*)" + + ")," + + "workers=\"2\", zkHost=\"" + cluster.getZkServer().getZkAddress() + "\", sort=\"a_s asc\")" + ); - //Test Long and Double Sums - Tuple tuple = tuples.get(0); - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assertTrue(bucket.equals("hello0")); - assertTrue(sumi.doubleValue() == 17.0D); - assertTrue(sumf.doubleValue() == 18.0D); - assertTrue(mini.doubleValue() == 0.0D); - assertTrue(minf.doubleValue() == 1.0D); - assertTrue(maxi.doubleValue() == 14.0D); - assertTrue(maxf.doubleValue() == 10.0D); - assertTrue(avgi.doubleValue() == 4.25D); - assertTrue(avgf.doubleValue() == 4.5D); - assertTrue(count.doubleValue() == 4); + assert (tuples.size() == 3); - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); + //Test Long and Double Sums - assertTrue(bucket.equals("hello3")); - assertTrue(sumi.doubleValue() == 38.0D); - assertTrue(sumf.doubleValue() == 26.0D); - assertTrue(mini.doubleValue() == 3.0D); - assertTrue(minf.doubleValue() == 3.0D); - assertTrue(maxi.doubleValue() == 13.0D); - assertTrue(maxf.doubleValue() == 9.0D); - assertTrue(avgi.doubleValue() == 9.5D); - assertTrue(avgf.doubleValue() == 6.5D); - assertTrue(count.doubleValue() == 4); + Tuple tuple = tuples.get(0); + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + Double sumf = tuple.getDouble("sum(a_f)"); + Double mini = tuple.getDouble("min(a_i)"); + Double minf = tuple.getDouble("min(a_f)"); + Double maxi = tuple.getDouble("max(a_i)"); + Double maxf = tuple.getDouble("max(a_f)"); + Double avgi = tuple.getDouble("avg(a_i)"); + Double avgf = tuple.getDouble("avg(a_f)"); + Double count = tuple.getDouble("count(*)"); - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); + assertTrue(bucket.equals("hello0")); + assertTrue(sumi.doubleValue() == 17.0D); + assertTrue(sumf.doubleValue() == 18.0D); + assertTrue(mini.doubleValue() == 0.0D); + assertTrue(minf.doubleValue() == 1.0D); + assertTrue(maxi.doubleValue() == 14.0D); + assertTrue(maxf.doubleValue() == 10.0D); + assertTrue(avgi.doubleValue() == 4.25D); + assertTrue(avgf.doubleValue() == 4.5D); + assertTrue(count.doubleValue() == 4); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); - assertTrue(bucket.equals("hello4")); - assertTrue(sumi.longValue() == 15); - assertTrue(sumf.doubleValue() == 11.0D); - assertTrue(mini.doubleValue() == 4.0D); - assertTrue(minf.doubleValue() == 4.0D); - assertTrue(maxi.doubleValue() == 11.0D); - assertTrue(maxf.doubleValue() == 7.0D); - assertTrue(avgi.doubleValue() == 7.5D); - assertTrue(avgf.doubleValue() == 5.5D); - assertTrue(count.doubleValue() == 2); + assertTrue(bucket.equals("hello3")); + assertTrue(sumi.doubleValue() == 38.0D); + assertTrue(sumf.doubleValue() == 26.0D); + assertTrue(mini.doubleValue() == 3.0D); + assertTrue(minf.doubleValue() == 3.0D); + assertTrue(maxi.doubleValue() == 13.0D); + assertTrue(maxf.doubleValue() == 9.0D); + assertTrue(avgi.doubleValue() == 9.5D); + assertTrue(avgf.doubleValue() == 6.5D); + assertTrue(count.doubleValue() == 4); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + assertTrue(bucket.equals("hello4")); + assertTrue(sumi.longValue() == 15); + assertTrue(sumf.doubleValue() == 11.0D); + assertTrue(mini.doubleValue() == 4.0D); + assertTrue(minf.doubleValue() == 4.0D); + assertTrue(maxi.doubleValue() == 11.0D); + assertTrue(maxf.doubleValue() == 7.0D); + assertTrue(avgi.doubleValue() == 7.5D); + assertTrue(avgf.doubleValue() == 5.5D); + assertTrue(count.doubleValue() == 2); + } finally { + solrClientCache.close(); + } } @Test @@ -1994,52 +2211,62 @@ public void testInnerJoinStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("innerJoin", InnerJoinStream.class); - - // Basic test - expression = StreamExpressionParser.parse("innerJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," - + "on=\"join1_i=join1_i, join2_s=join2_s\")"); - stream = new InnerJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 8); - assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7); - - // Basic desc - expression = StreamExpressionParser.parse("innerJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "on=\"join1_i=join1_i, join2_s=join2_s\")"); - stream = new InnerJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 8); - assertOrder(tuples, 7,3,4,5,1,1,15,15); - - // Results in both searches, no join matches - expression = StreamExpressionParser.parse("innerJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\")," - + "on=\"ident_s=right.ident_s\")"); - stream = new InnerJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 0); - - // Differing field names - expression = StreamExpressionParser.parse("innerJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\", aliases=\"join3_i=aliasesField\")," - + "on=\"join1_i=aliasesField, join2_s=join2_s\")"); - stream = new InnerJoinStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 8); - assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7); + try { + // Basic test + expression = StreamExpressionParser.parse("innerJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," + + "on=\"join1_i=join1_i, join2_s=join2_s\")"); + stream = new InnerJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 8); + assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7); + + // Basic desc + expression = StreamExpressionParser.parse("innerJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "on=\"join1_i=join1_i, join2_s=join2_s\")"); + stream = new InnerJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 8); + assertOrder(tuples, 7, 3, 4, 5, 1, 1, 15, 15); + + // Results in both searches, no join matches + expression = StreamExpressionParser.parse("innerJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\")," + + "on=\"ident_s=right.ident_s\")"); + stream = new InnerJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 0); + + // Differing field names + expression = StreamExpressionParser.parse("innerJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\", aliases=\"join3_i=aliasesField\")," + + "on=\"join1_i=aliasesField, join2_s=join2_s\")"); + stream = new InnerJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 8); + assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7); + } finally { + solrClientCache.close(); + } } @Test @@ -2067,6 +2294,9 @@ public void testLeftOuterJoinStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) @@ -2074,45 +2304,52 @@ public void testLeftOuterJoinStream() throws Exception { .withFunctionName("leftOuterJoin", LeftOuterJoinStream.class); // Basic test - expression = StreamExpressionParser.parse("leftOuterJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," - + "on=\"join1_i=join1_i, join2_s=join2_s\")"); - stream = new LeftOuterJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 10); - assertOrder(tuples, 1,1,15,15,2,3,4,5,6,7); - - // Basic desc - expression = StreamExpressionParser.parse("leftOuterJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "on=\"join1_i=join1_i, join2_s=join2_s\")"); - stream = new LeftOuterJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 10); - assertOrder(tuples, 7, 6, 3, 4, 5, 1, 1, 15, 15, 2); - - // Results in both searches, no join matches - expression = StreamExpressionParser.parse("leftOuterJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\")," - + "on=\"ident_s=right.ident_s\")"); - stream = new LeftOuterJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 8); - assertOrder(tuples, 1, 15, 2, 3, 4, 5, 6, 7); - - // Differing field names - expression = StreamExpressionParser.parse("leftOuterJoin(" - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\", aliases=\"join3_i=aliasesField\")," - + "on=\"join1_i=aliasesField, join2_s=join2_s\")"); - stream = new LeftOuterJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 10); - assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); - + try { + expression = StreamExpressionParser.parse("leftOuterJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," + + "on=\"join1_i=join1_i, join2_s=join2_s\")"); + stream = new LeftOuterJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); + + // Basic desc + expression = StreamExpressionParser.parse("leftOuterJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "on=\"join1_i=join1_i, join2_s=join2_s\")"); + stream = new LeftOuterJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + assertOrder(tuples, 7, 6, 3, 4, 5, 1, 1, 15, 15, 2); + + // Results in both searches, no join matches + expression = StreamExpressionParser.parse("leftOuterJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\")," + + "on=\"ident_s=right.ident_s\")"); + stream = new LeftOuterJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 8); + assertOrder(tuples, 1, 15, 2, 3, 4, 5, 6, 7); + + // Differing field names + expression = StreamExpressionParser.parse("leftOuterJoin(" + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\", aliases=\"join3_i=aliasesField\")," + + "on=\"join1_i=aliasesField, join2_s=join2_s\")"); + stream = new LeftOuterJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); + } finally { + solrClientCache.close(); + } } @Test @@ -2140,53 +2377,62 @@ public void testHashJoinStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("hashJoin", HashJoinStream.class); - - // Basic test - expression = StreamExpressionParser.parse("hashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," - + "on=\"join1_i, join2_s\")"); - stream = new HashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 8); - assertOrder(tuples, 1,1,15,15,3,4,5,7); - - // Basic desc - expression = StreamExpressionParser.parse("hashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "on=\"join1_i, join2_s\")"); - stream = new HashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 8); - assertOrder(tuples, 7,3,4,5,1,1,15,15); - - // Results in both searches, no join matches - expression = StreamExpressionParser.parse("hashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," - + "on=\"ident_s\")"); - stream = new HashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 0); - - // Basic test with "on" mapping - expression = StreamExpressionParser.parse("hashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join3_i,ident_s\", sort=\"join1_i asc, join3_i asc, id asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join3_i,ident_s\", sort=\"join1_i asc, join3_i asc\")," - + "on=\"join1_i=join3_i\")"); - stream = new HashJoinStream(expression, factory); - tuples = getTuples(stream); - assertEquals(17, tuples.size()); + try { + // Basic test + expression = StreamExpressionParser.parse("hashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," + + "on=\"join1_i, join2_s\")"); + stream = new HashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 8); + assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7); + + // Basic desc + expression = StreamExpressionParser.parse("hashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "on=\"join1_i, join2_s\")"); + stream = new HashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 8); + assertOrder(tuples, 7, 3, 4, 5, 1, 1, 15, 15); + + // Results in both searches, no join matches + expression = StreamExpressionParser.parse("hashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," + + "on=\"ident_s\")"); + stream = new HashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 0); - //Does a lexical sort - assertOrder(tuples, 1, 1, 15, 15, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 7); + // Basic test with "on" mapping + expression = StreamExpressionParser.parse("hashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join3_i,ident_s\", sort=\"join1_i asc, join3_i asc, id asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join3_i,ident_s\", sort=\"join1_i asc, join3_i asc\")," + + "on=\"join1_i=join3_i\")"); + stream = new HashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(17, tuples.size()); + //Does a lexical sort + assertOrder(tuples, 1, 1, 15, 15, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 7); + } finally { + solrClientCache.close(); + } } @Test @@ -2214,56 +2460,66 @@ public void testOuterHashJoinStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("outerHashJoin", OuterHashJoinStream.class); - - // Basic test - expression = StreamExpressionParser.parse("outerHashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," - + "on=\"join1_i, join2_s\")"); - stream = new OuterHashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 10); - assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); - - // Basic desc - expression = StreamExpressionParser.parse("outerHashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," - + "on=\"join1_i, join2_s\")"); - stream = new OuterHashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 10); - assertOrder(tuples, 7,6,3,4,5,1,1,15,15,2); - - // Results in both searches, no join matches - expression = StreamExpressionParser.parse("outerHashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," - + "on=\"ident_s\")"); - stream = new OuterHashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 8); - assertOrder(tuples, 1,15,2,3,4,5,6,7); - - // Basic test - expression = StreamExpressionParser.parse("outerHashJoin(" - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," - + "hashed=search(collection1, q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join2_s asc\")," - + "on=\"join1_i=join3_i, join2_s\")"); - stream = new OuterHashJoinStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 10); - assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); - } - - @Test - public void testSelectStream() throws Exception { - + try { + // Basic test + expression = StreamExpressionParser.parse("outerHashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\")," + + "on=\"join1_i, join2_s\")"); + stream = new OuterHashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); + + // Basic desc + expression = StreamExpressionParser.parse("outerHashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\")," + + "on=\"join1_i, join2_s\")"); + stream = new OuterHashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + assertOrder(tuples, 7, 6, 3, 4, 5, 1, 1, 15, 15, 2); + + // Results in both searches, no join matches + expression = StreamExpressionParser.parse("outerHashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\")," + + "on=\"ident_s\")"); + stream = new OuterHashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 8); + assertOrder(tuples, 1, 15, 2, 3, 4, 5, 6, 7); + + // Basic test + expression = StreamExpressionParser.parse("outerHashJoin(" + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")," + + "hashed=search(collection1, q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join2_s asc\")," + + "on=\"join1_i=join3_i, join2_s\")"); + stream = new OuterHashJoinStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assert (tuples.size() == 10); + assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7); + } finally { + solrClientCache.close(); + } + } + + @Test + public void testSelectStream() throws Exception { + new UpdateRequest() .add(id, "1", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9 .add(id, "15", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9 @@ -2286,7 +2542,11 @@ public void testSelectStream() throws Exception { String clause; TupleStream stream; List tuples; - + + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) @@ -2298,92 +2558,101 @@ public void testSelectStream() throws Exception { .withFunctionName("if", IfThenElseEvaluator.class) .withFunctionName("gt", GreaterThanEvaluator.class) ; - - // Basic test - clause = "select(" - + "id, join1_i as join1, join2_s as join2, ident_s as identity," - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" - + ")"; - stream = factory.constructStream(clause); - tuples = getTuples(stream); - assertFields(tuples, "id", "join1", "join2", "identity"); - assertNotFields(tuples, "join1_i", "join2_s", "ident_s"); - - // Basic with replacements test - clause = "select(" - + "id, join1_i as join1, join2_s as join2, ident_s as identity," - + "replace(join1, 0, withValue=12), replace(join1, 3, withValue=12), replace(join1, 2, withField=join2)," - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" - + ")"; - stream = factory.constructStream(clause); - tuples = getTuples(stream); - assertFields(tuples, "id", "join1", "join2", "identity"); - assertNotFields(tuples, "join1_i", "join2_s", "ident_s"); - assertLong(tuples.get(0), "join1", 12); - assertLong(tuples.get(1), "join1", 12); - assertLong(tuples.get(2), "join1", 12); - assertLong(tuples.get(7), "join1", 12); - assertString(tuples.get(6), "join1", "d"); - - // Basic with replacements and concat test - clause = "select(" - + "id, join1_i as join1, join2_s as join2, ident_s as identity," - + "replace(join1, 0, withValue=12), replace(join1, 3, withValue=12), replace(join1, 2, withField=join2)," - + "concat(fields=\"identity,join1\", as=\"newIdentity\",delim=\"-\")," - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" - + ")"; - stream = factory.constructStream(clause); - tuples = getTuples(stream); - assertFields(tuples, "id", "join1", "join2", "identity", "newIdentity"); - assertNotFields(tuples, "join1_i", "join2_s", "ident_s"); - assertLong(tuples.get(0), "join1", 12); - assertString(tuples.get(0), "newIdentity", "left_1-12"); - assertLong(tuples.get(1), "join1", 12); - assertString(tuples.get(1), "newIdentity", "left_1-12"); - assertLong(tuples.get(2), "join1", 12); - assertString(tuples.get(2), "newIdentity", "left_2-12"); - assertLong(tuples.get(7), "join1", 12); - assertString(tuples.get(7), "newIdentity", "left_7-12"); - assertString(tuples.get(6), "join1", "d"); - assertString(tuples.get(6), "newIdentity", "left_6-d"); - - // Inner stream test - clause = "innerJoin(" - + "select(" - + "id, join1_i as left.join1, join2_s as left.join2, ident_s as left.ident," - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" - + ")," - + "select(" - + "join3_i as right.join1, join2_s as right.join2, ident_s as right.ident," - + "search(collection1, q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\")," - + ")," - + "on=\"left.join1=right.join1, left.join2=right.join2\"" - + ")"; - stream = factory.constructStream(clause); - tuples = getTuples(stream); - assertFields(tuples, "id", "left.join1", "left.join2", "left.ident", "right.join1", "right.join2", "right.ident"); - - // Wrapped select test - clause = "select(" - + "id, left.ident, right.ident," - + "innerJoin(" - + "select(" - + "id, join1_i as left.join1, join2_s as left.join2, ident_s as left.ident," - + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" - + ")," - + "select(" - + "join3_i as right.join1, join2_s as right.join2, ident_s as right.ident," - + "search(collection1, q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\")," - + ")," - + "on=\"left.join1=right.join1, left.join2=right.join2\"" - + ")" - + ")"; - stream = factory.constructStream(clause); - tuples = getTuples(stream); - assertFields(tuples, "id", "left.ident", "right.ident"); - assertNotFields(tuples, "left.join1", "left.join2", "right.join1", "right.join2"); + try { + // Basic test + clause = "select(" + + "id, join1_i as join1, join2_s as join2, ident_s as identity," + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" + + ")"; + stream = factory.constructStream(clause); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertFields(tuples, "id", "join1", "join2", "identity"); + assertNotFields(tuples, "join1_i", "join2_s", "ident_s"); + + // Basic with replacements test + clause = "select(" + + "id, join1_i as join1, join2_s as join2, ident_s as identity," + + "replace(join1, 0, withValue=12), replace(join1, 3, withValue=12), replace(join1, 2, withField=join2)," + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" + + ")"; + stream = factory.constructStream(clause); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertFields(tuples, "id", "join1", "join2", "identity"); + assertNotFields(tuples, "join1_i", "join2_s", "ident_s"); + assertLong(tuples.get(0), "join1", 12); + assertLong(tuples.get(1), "join1", 12); + assertLong(tuples.get(2), "join1", 12); + assertLong(tuples.get(7), "join1", 12); + assertString(tuples.get(6), "join1", "d"); + + + // Basic with replacements and concat test + clause = "select(" + + "id, join1_i as join1, join2_s as join2, ident_s as identity," + + "replace(join1, 0, withValue=12), replace(join1, 3, withValue=12), replace(join1, 2, withField=join2)," + + "concat(fields=\"identity,join1\", as=\"newIdentity\",delim=\"-\")," + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" + + ")"; + stream = factory.constructStream(clause); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertFields(tuples, "id", "join1", "join2", "identity", "newIdentity"); + assertNotFields(tuples, "join1_i", "join2_s", "ident_s"); + assertLong(tuples.get(0), "join1", 12); + assertString(tuples.get(0), "newIdentity", "left_1-12"); + assertLong(tuples.get(1), "join1", 12); + assertString(tuples.get(1), "newIdentity", "left_1-12"); + assertLong(tuples.get(2), "join1", 12); + assertString(tuples.get(2), "newIdentity", "left_2-12"); + assertLong(tuples.get(7), "join1", 12); + assertString(tuples.get(7), "newIdentity", "left_7-12"); + assertString(tuples.get(6), "join1", "d"); + assertString(tuples.get(6), "newIdentity", "left_6-d"); + + // Inner stream test + clause = "innerJoin(" + + "select(" + + "id, join1_i as left.join1, join2_s as left.join2, ident_s as left.ident," + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" + + ")," + + "select(" + + "join3_i as right.join1, join2_s as right.join2, ident_s as right.ident," + + "search(collection1, q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\")," + + ")," + + "on=\"left.join1=right.join1, left.join2=right.join2\"" + + ")"; + stream = factory.constructStream(clause); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertFields(tuples, "id", "left.join1", "left.join2", "left.ident", "right.join1", "right.join2", "right.ident"); + + // Wrapped select test + clause = "select(" + + "id, left.ident, right.ident," + + "innerJoin(" + + "select(" + + "id, join1_i as left.join1, join2_s as left.join2, ident_s as left.ident," + + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\")" + + ")," + + "select(" + + "join3_i as right.join1, join2_s as right.join2, ident_s as right.ident," + + "search(collection1, q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\")," + + ")," + + "on=\"left.join1=right.join1, left.join2=right.join2\"" + + ")" + + ")"; + stream = factory.constructStream(clause); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertFields(tuples, "id", "left.ident", "right.ident"); + assertNotFields(tuples, "left.join1", "left.join2", "right.join1", "right.join2"); + } finally { + solrClientCache.close(); + } } @Test @@ -3326,8 +3595,8 @@ public void testParallelTopicStream() throws Exception { .add(id, "3", "a_s", "hello", "a_i", "3", "a_f", "3", "subject", "ha ha bla blah3") .add(id, "4", "a_s", "hello", "a_i", "4", "a_f", "4", "subject", "ha ha bla blah4") .add(id, "1", "a_s", "hello", "a_i", "1", "a_f", "5", "subject", "ha ha bla blah5") - .add(id, "5", "a_s", "hello", "a_i", "10", "a_f", "6","subject", "ha ha bla blah6") - .add(id, "6", "a_s", "hello", "a_i", "11", "a_f", "7","subject", "ha ha bla blah7") + .add(id, "5", "a_s", "hello", "a_i", "10", "a_f", "6", "subject", "ha ha bla blah6") + .add(id, "6", "a_s", "hello", "a_i", "11", "a_f", "7", "subject", "ha ha bla blah7") .add(id, "7", "a_s", "hello", "a_i", "12", "a_f", "8", "subject", "ha ha bla blah8") .add(id, "8", "a_s", "hello", "a_i", "13", "a_f", "9", "subject", "ha ha bla blah9") .add(id, "9", "a_s", "hello", "a_i", "14", "a_f", "10", "subject", "ha ha bla blah10") @@ -3425,7 +3694,7 @@ public void testParallelTopicStream() throws Exception { context = new StreamContext(); context.setSolrClientCache(cache); stream.setStreamContext(context); - assertTopicRun(stream, "0","1","2","3","4","5","6","7","8","9","10","11"); + assertTopicRun(stream, "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"); //Add more documents //Index a few more documents @@ -3440,7 +3709,7 @@ public void testParallelTopicStream() throws Exception { context = new StreamContext(); context.setSolrClientCache(cache); stream.setStreamContext(context); - assertTopicRun(stream, "12","13"); + assertTopicRun(stream, "12", "13"); //Test text extraction @@ -3487,7 +3756,7 @@ public void testUpdateStream() throws Exception { false, true, TIMEOUT); new UpdateRequest() - .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") + .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0", "s_multi", "aaaa1", "s_multi", "bbbb1", "i_multi", "44", "i_multi", "77") .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "s_multi", "aaaa2", "s_multi", "bbbb2", "i_multi", "444", "i_multi", "777") .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "s_multi", "aaaa3", "s_multi", "bbbb3", "i_multi", "4444", "i_multi", "7777") @@ -3497,72 +3766,80 @@ public void testUpdateStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) .withCollectionZkHost("destinationCollection", cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("update", UpdateStream.class); - - //Copy all docs to destinationCollection - expression = StreamExpressionParser.parse("update(destinationCollection, batchSize=5, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\"))"); - stream = new UpdateStream(expression, factory); - List tuples = getTuples(stream); - cluster.getSolrClient().commit("destinationCollection"); - - //Ensure that all UpdateStream tuples indicate the correct number of copied/indexed docs - assert(tuples.size() == 1); - t = tuples.get(0); - assert(t.EOF == false); - assertEquals(5, t.get("batchIndexed")); - - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(destinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello0")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); - - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello1")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); - - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello2")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + try { + //Copy all docs to destinationCollection + expression = StreamExpressionParser.parse("update(destinationCollection, batchSize=5, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\"))"); + stream = new UpdateStream(expression, factory); + stream.setStreamContext(streamContext); + List tuples = getTuples(stream); + cluster.getSolrClient().commit("destinationCollection"); - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello3")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + //Ensure that all UpdateStream tuples indicate the correct number of copied/indexed docs + assert (tuples.size() == 1); + t = tuples.get(0); + assert (t.EOF == false); + assertEquals(5, t.get("batchIndexed")); - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello4")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(destinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello0")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello1")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello2")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello3")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello4")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @Test @@ -3573,7 +3850,7 @@ public void testParallelUpdateStream() throws Exception { false, true, TIMEOUT); new UpdateRequest() - .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") + .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0", "s_multi", "aaaa1", "s_multi", "bbbb1", "i_multi", "44", "i_multi", "77") .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "s_multi", "aaaa2", "s_multi", "bbbb2", "i_multi", "444", "i_multi", "777") .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "s_multi", "aaaa3", "s_multi", "bbbb3", "i_multi", "4444", "i_multi", "7777") @@ -3583,6 +3860,9 @@ public void testParallelUpdateStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory factory = new StreamFactory() @@ -3592,68 +3872,73 @@ public void testParallelUpdateStream() throws Exception { .withFunctionName("update", UpdateStream.class) .withFunctionName("parallel", ParallelStream.class); - //Copy all docs to destinationCollection - String updateExpression = "update(parallelDestinationCollection, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"))"; - TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\""+zkHost+"\", sort=\"batchNumber asc\")"); - List tuples = getTuples(parallelUpdateStream); - cluster.getSolrClient().commit("parallelDestinationCollection"); - - //Ensure that all UpdateStream tuples indicate the correct number of copied/indexed docs - long count = 0; - - for(Tuple tuple : tuples) { - count+=tuple.getLong("batchIndexed"); - } - - assert(count == 5); - - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(parallelDestinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello0")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); - - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello1")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); - - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello2")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + try { + //Copy all docs to destinationCollection + String updateExpression = "update(parallelDestinationCollection, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"))"; + TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"batchNumber asc\")"); + parallelUpdateStream.setStreamContext(streamContext); + List tuples = getTuples(parallelUpdateStream); + cluster.getSolrClient().commit("parallelDestinationCollection"); + + //Ensure that all UpdateStream tuples indicate the correct number of copied/indexed docs + long count = 0; + + for (Tuple tuple : tuples) { + count += tuple.getLong("batchIndexed"); + } - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello3")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + assert (count == 5); - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello4")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("parallelDestinationCollection").process(cluster.getSolrClient()); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(parallelDestinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello0")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello1")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello2")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello3")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello4")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("parallelDestinationCollection").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @Test @@ -3674,6 +3959,9 @@ public void testParallelDaemonUpdateStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory factory = new StreamFactory() @@ -3684,139 +3972,147 @@ public void testParallelDaemonUpdateStream() throws Exception { .withFunctionName("parallel", ParallelStream.class) .withFunctionName("daemon", DaemonStream.class); - //Copy all docs to destinationCollection - String updateExpression = "daemon(update(parallelDestinationCollection1, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\")), runInterval=\"1000\", id=\"test\")"; - TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\""+zkHost+"\", sort=\"batchNumber asc\")"); - List tuples = getTuples(parallelUpdateStream); - assert(tuples.size() == 2); - - //Lets sleep long enough for daemon updates to run. - //Lets stop the daemons - ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream", "action", "list")); + try { + //Copy all docs to destinationCollection + String updateExpression = "daemon(update(parallelDestinationCollection1, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\")), runInterval=\"1000\", id=\"test\")"; + TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"batchNumber asc\")"); + parallelUpdateStream.setStreamContext(streamContext); + List tuples = getTuples(parallelUpdateStream); + assert (tuples.size() == 2); - int workersComplete = 0; - for(JettySolrRunner jetty : cluster.getJettySolrRunners()) { - int iterations = 0; - INNER: - while(iterations == 0) { - SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); - solrStream.open(); - Tuple tupleResponse = solrStream.read(); - if (tupleResponse.EOF) { - solrStream.close(); - break INNER; - } else { - long l = tupleResponse.getLong("iterations"); - if(l > 0) { - ++workersComplete; + //Lets sleep long enough for daemon updates to run. + //Lets stop the daemons + ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream", "action", "list")); + + int workersComplete = 0; + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { + int iterations = 0; + INNER: + while (iterations == 0) { + SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); + solrStream.open(); + Tuple tupleResponse = solrStream.read(); + if (tupleResponse.EOF) { + solrStream.close(); + break INNER; } else { - try { - Thread.sleep(1000); - } catch(Exception e) { - + long l = tupleResponse.getLong("iterations"); + if (l > 0) { + ++workersComplete; + } else { + try { + Thread.sleep(1000); + } catch (Exception e) { + + } } + iterations = (int) l; + solrStream.close(); } - iterations = (int) l; - solrStream.close(); } } - } - assertEquals(cluster.getJettySolrRunners().size(), workersComplete); + assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - cluster.getSolrClient().commit("parallelDestinationCollection1"); + cluster.getSolrClient().commit("parallelDestinationCollection1"); - //Lets stop the daemons - sParams = new ModifiableSolrParams(); - sParams.set(CommonParams.QT, "/stream"); - sParams.set("action", "stop"); - sParams.set("id", "test"); - for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { - SolrStream solrStream = new SolrStream(jetty.getBaseUrl() + "/collection1", sParams); - solrStream.open(); - Tuple tupleResponse = solrStream.read(); - solrStream.close(); - } - - sParams = new ModifiableSolrParams(); - sParams.set(CommonParams.QT, "/stream"); - sParams.set("action", "list"); - - workersComplete = 0; - for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { - long stopTime = 0; - INNER: - while(stopTime == 0) { + //Lets stop the daemons + sParams = new ModifiableSolrParams(); + sParams.set(CommonParams.QT, "/stream"); + sParams.set("action", "stop"); + sParams.set("id", "test"); + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { SolrStream solrStream = new SolrStream(jetty.getBaseUrl() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); solrStream.open(); Tuple tupleResponse = solrStream.read(); - if (tupleResponse.EOF) { - solrStream.close(); - break INNER; - } else { - stopTime = tupleResponse.getLong("stopTime"); - if (stopTime > 0) { - ++workersComplete; - } else { - try { - Thread.sleep(1000); - } catch(Exception e) { + solrStream.close(); + } + sParams = new ModifiableSolrParams(); + sParams.set(CommonParams.QT, "/stream"); + sParams.set("action", "list"); + + workersComplete = 0; + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { + long stopTime = 0; + INNER: + while (stopTime == 0) { + SolrStream solrStream = new SolrStream(jetty.getBaseUrl() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); + solrStream.open(); + Tuple tupleResponse = solrStream.read(); + if (tupleResponse.EOF) { + solrStream.close(); + break INNER; + } else { + stopTime = tupleResponse.getLong("stopTime"); + if (stopTime > 0) { + ++workersComplete; + } else { + try { + Thread.sleep(1000); + } catch (Exception e) { + + } } + solrStream.close(); } - solrStream.close(); } } - } - - assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(parallelDestinationCollection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello0")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); - - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello1")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); - - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello2")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); - - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello3")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello4")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("parallelDestinationCollection1").process(cluster.getSolrClient()); + assertEquals(cluster.getJettySolrRunners().size(), workersComplete); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(parallelDestinationCollection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello0")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello1")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello2")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello3")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello4")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("parallelDestinationCollection1").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @@ -3829,7 +4125,7 @@ public void testParallelTerminatingDaemonUpdateStream() throws Exception { false, true, TIMEOUT); new UpdateRequest() - .add(id, "0", "a_s", "hello", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") + .add(id, "0", "a_s", "hello", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") .add(id, "2", "a_s", "hello", "a_i", "2", "a_f", "0", "s_multi", "aaaa1", "s_multi", "bbbb1", "i_multi", "44", "i_multi", "77") .add(id, "3", "a_s", "hello", "a_i", "3", "a_f", "3", "s_multi", "aaaa2", "s_multi", "bbbb2", "i_multi", "444", "i_multi", "777") .add(id, "4", "a_s", "hello", "a_i", "4", "a_f", "4", "s_multi", "aaaa3", "s_multi", "bbbb3", "i_multi", "4444", "i_multi", "7777") @@ -3839,6 +4135,9 @@ public void testParallelTerminatingDaemonUpdateStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory factory = new StreamFactory() @@ -3849,87 +4148,93 @@ public void testParallelTerminatingDaemonUpdateStream() throws Exception { .withFunctionName("parallel", ParallelStream.class) .withFunctionName("daemon", DaemonStream.class); - //Copy all docs to destinationCollection - String updateExpression = "daemon(update(parallelDestinationCollection1, batchSize=2, topic(collection1, collection1, q=\"a_s:hello\", fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", partitionKeys=\"a_f\", initialCheckpoint=0, id=\"topic1\")), terminate=true, runInterval=\"1000\", id=\"test\")"; - TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\""+zkHost+"\", sort=\"batchNumber asc\")"); - List tuples = getTuples(parallelUpdateStream); - assert(tuples.size() == 2); + try { + //Copy all docs to destinationCollection + String updateExpression = "daemon(update(parallelDestinationCollection1, batchSize=2, topic(collection1, collection1, q=\"a_s:hello\", fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", partitionKeys=\"a_f\", initialCheckpoint=0, id=\"topic1\")), terminate=true, runInterval=\"1000\", id=\"test\")"; + TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"batchNumber asc\")"); + parallelUpdateStream.setStreamContext(streamContext); + List tuples = getTuples(parallelUpdateStream); + assert (tuples.size() == 2); - ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream", "action", "list")); + ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream", "action", "list")); - int workersComplete = 0; + int workersComplete = 0; - //Daemons should terminate after the topic is completed - //Loop through all shards and wait for the daemons to be gone from the listing. - for(JettySolrRunner jetty : cluster.getJettySolrRunners()) { - INNER: - while(true) { - SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); - solrStream.open(); - Tuple tupleResponse = solrStream.read(); - if (tupleResponse.EOF) { - solrStream.close(); - ++workersComplete; - break INNER; - } else { - solrStream.close(); - Thread.sleep(1000); + //Daemons should terminate after the topic is completed + //Loop through all shards and wait for the daemons to be gone from the listing. + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { + INNER: + while (true) { + SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); + solrStream.open(); + Tuple tupleResponse = solrStream.read(); + if (tupleResponse.EOF) { + solrStream.close(); + ++workersComplete; + break INNER; + } else { + solrStream.close(); + Thread.sleep(1000); + } } } - } - - assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - - cluster.getSolrClient().commit("parallelDestinationCollection1"); - - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(parallelDestinationCollection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); - - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + cluster.getSolrClient().commit("parallelDestinationCollection1"); - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("parallelDestinationCollection1").process(cluster.getSolrClient()); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(parallelDestinationCollection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("parallelDestinationCollection1").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @@ -3943,7 +4248,7 @@ public void testCommitStream() throws Exception { false, true, TIMEOUT); new UpdateRequest() - .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") + .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0", "s_multi", "aaaa1", "s_multi", "bbbb1", "i_multi", "44", "i_multi", "77") .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "s_multi", "aaaa2", "s_multi", "bbbb2", "i_multi", "444", "i_multi", "777") .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "s_multi", "aaaa3", "s_multi", "bbbb3", "i_multi", "4444", "i_multi", "7777") @@ -3953,6 +4258,9 @@ public void testCommitStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) @@ -3960,65 +4268,70 @@ public void testCommitStream() throws Exception { .withFunctionName("search", CloudSolrStream.class) .withFunctionName("update", UpdateStream.class) .withFunctionName("commit", CommitStream.class); - - //Copy all docs to destinationCollection - expression = StreamExpressionParser.parse("commit(destinationCollection, batchSize=2, update(destinationCollection, batchSize=5, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\")))"); - stream = factory.constructStream(expression); - List tuples = getTuples(stream); - - //Ensure that all CommitStream tuples indicate the correct number of copied/indexed docs - assert(tuples.size() == 1); - t = tuples.get(0); - assert(t.EOF == false); - assertEquals(5, t.get("batchIndexed")); - - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(destinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello0")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); - - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello1")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello2")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + try { + //Copy all docs to destinationCollection + expression = StreamExpressionParser.parse("commit(destinationCollection, batchSize=2, update(destinationCollection, batchSize=5, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\")))"); + stream = factory.constructStream(expression); + stream.setStreamContext(streamContext); + List tuples = getTuples(stream); - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello3")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + //Ensure that all CommitStream tuples indicate the correct number of copied/indexed docs + assert (tuples.size() == 1); + t = tuples.get(0); + assert (t.EOF == false); + assertEquals(5, t.get("batchIndexed")); - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello4")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(destinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello0")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello1")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello2")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello3")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello4")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @Test @@ -4039,6 +4352,9 @@ public void testParallelCommitStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory factory = new StreamFactory() @@ -4049,67 +4365,73 @@ public void testParallelCommitStream() throws Exception { .withFunctionName("commit", CommitStream.class) .withFunctionName("parallel", ParallelStream.class); - //Copy all docs to destinationCollection - String updateExpression = "commit(parallelDestinationCollection, batchSize=0, zkHost=\"" + cluster.getZkServer().getZkAddress() + "\", update(parallelDestinationCollection, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\")))"; - TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\""+zkHost+"\", sort=\"batchNumber asc\")"); - List tuples = getTuples(parallelUpdateStream); - - //Ensure that all UpdateStream tuples indicate the correct number of copied/indexed docs - long count = 0; - - for(Tuple tuple : tuples) { - count+=tuple.getLong("batchIndexed"); - } - - assert(count == 5); - - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(parallelDestinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); + try { + //Copy all docs to destinationCollection + String updateExpression = "commit(parallelDestinationCollection, batchSize=0, zkHost=\"" + cluster.getZkServer().getZkAddress() + "\", update(parallelDestinationCollection, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\")))"; + TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"batchNumber asc\")"); + parallelUpdateStream.setStreamContext(streamContext); + List tuples = getTuples(parallelUpdateStream); - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello0")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + //Ensure that all UpdateStream tuples indicate the correct number of copied/indexed docs + long count = 0; - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello1")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + for (Tuple tuple : tuples) { + count += tuple.getLong("batchIndexed"); + } - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello2")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + assert (count == 5); - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello3")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello4")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("parallelDestinationCollection").process(cluster.getSolrClient()); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(parallelDestinationCollection, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello0")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello1")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello2")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello3")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello4")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("parallelDestinationCollection").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @Test @@ -4120,7 +4442,7 @@ public void testParallelDaemonCommitStream() throws Exception { false, true, TIMEOUT); new UpdateRequest() - .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") + .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0", "s_multi", "aaaa", "s_multi", "bbbb", "i_multi", "4", "i_multi", "7") .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0", "s_multi", "aaaa1", "s_multi", "bbbb1", "i_multi", "44", "i_multi", "77") .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "s_multi", "aaaa2", "s_multi", "bbbb2", "i_multi", "444", "i_multi", "777") .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "s_multi", "aaaa3", "s_multi", "bbbb3", "i_multi", "4444", "i_multi", "7777") @@ -4130,6 +4452,9 @@ public void testParallelDaemonCommitStream() throws Exception { StreamExpression expression; TupleStream stream; Tuple t; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); String zkHost = cluster.getZkServer().getZkAddress(); StreamFactory factory = new StreamFactory() @@ -4141,136 +4466,144 @@ public void testParallelDaemonCommitStream() throws Exception { .withFunctionName("parallel", ParallelStream.class) .withFunctionName("daemon", DaemonStream.class); - //Copy all docs to destinationCollection - String updateExpression = "daemon(commit(parallelDestinationCollection1, batchSize=0, zkHost=\"" + cluster.getZkServer().getZkAddress() + "\", update(parallelDestinationCollection1, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"))), runInterval=\"1000\", id=\"test\")"; - TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\""+zkHost+"\", sort=\"batchNumber asc\")"); - List tuples = getTuples(parallelUpdateStream); - assert(tuples.size() == 2); - - //Lets sleep long enough for daemon updates to run. - //Lets stop the daemons - ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream", "action", "list")); + try { + //Copy all docs to destinationCollection + String updateExpression = "daemon(commit(parallelDestinationCollection1, batchSize=0, zkHost=\"" + cluster.getZkServer().getZkAddress() + "\", update(parallelDestinationCollection1, batchSize=2, search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"))), runInterval=\"1000\", id=\"test\")"; + TupleStream parallelUpdateStream = factory.constructStream("parallel(collection1, " + updateExpression + ", workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"batchNumber asc\")"); + parallelUpdateStream.setStreamContext(streamContext); + List tuples = getTuples(parallelUpdateStream); + assert (tuples.size() == 2); - int workersComplete = 0; - for(JettySolrRunner jetty : cluster.getJettySolrRunners()) { - int iterations = 0; - INNER: - while(iterations == 0) { - SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); - solrStream.open(); - Tuple tupleResponse = solrStream.read(); - if (tupleResponse.EOF) { - solrStream.close(); - break INNER; - } else { - long l = tupleResponse.getLong("iterations"); - if(l > 0) { - ++workersComplete; + //Lets sleep long enough for daemon updates to run. + //Lets stop the daemons + ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream", "action", "list")); + + int workersComplete = 0; + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { + int iterations = 0; + INNER: + while (iterations == 0) { + SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); + solrStream.open(); + Tuple tupleResponse = solrStream.read(); + if (tupleResponse.EOF) { + solrStream.close(); + break INNER; } else { - try { - Thread.sleep(1000); - } catch(Exception e) { + long l = tupleResponse.getLong("iterations"); + if (l > 0) { + ++workersComplete; + } else { + try { + Thread.sleep(1000); + } catch (Exception e) { + } } + iterations = (int) l; + solrStream.close(); } - iterations = (int) l; - solrStream.close(); } } - } - - assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - - //Lets stop the daemons - sParams = new ModifiableSolrParams(); - sParams.set(CommonParams.QT, "/stream"); - sParams.set("action", "stop"); - sParams.set("id", "test"); - for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { - SolrStream solrStream = new SolrStream(jetty.getBaseUrl() + "/collection1", sParams); - solrStream.open(); - Tuple tupleResponse = solrStream.read(); - solrStream.close(); - } - sParams = new ModifiableSolrParams(); - sParams.set(CommonParams.QT, "/stream"); - sParams.set("action", "list"); + assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - workersComplete = 0; - for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { - long stopTime = 0; - INNER: - while(stopTime == 0) { + //Lets stop the daemons + sParams = new ModifiableSolrParams(); + sParams.set(CommonParams.QT, "/stream"); + sParams.set("action", "stop"); + sParams.set("id", "test"); + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { SolrStream solrStream = new SolrStream(jetty.getBaseUrl() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); solrStream.open(); Tuple tupleResponse = solrStream.read(); - if (tupleResponse.EOF) { - solrStream.close(); - break INNER; - } else { - stopTime = tupleResponse.getLong("stopTime"); - if (stopTime > 0) { - ++workersComplete; - } else { - try { - Thread.sleep(1000); - } catch(Exception e) { + solrStream.close(); + } + sParams = new ModifiableSolrParams(); + sParams.set(CommonParams.QT, "/stream"); + sParams.set("action", "list"); + + workersComplete = 0; + for (JettySolrRunner jetty : cluster.getJettySolrRunners()) { + long stopTime = 0; + INNER: + while (stopTime == 0) { + SolrStream solrStream = new SolrStream(jetty.getBaseUrl() + "/collection1", sParams); + solrStream.setStreamContext(streamContext); + solrStream.open(); + Tuple tupleResponse = solrStream.read(); + if (tupleResponse.EOF) { + solrStream.close(); + break INNER; + } else { + stopTime = tupleResponse.getLong("stopTime"); + if (stopTime > 0) { + ++workersComplete; + } else { + try { + Thread.sleep(1000); + } catch (Exception e) { + + } } + solrStream.close(); } - solrStream.close(); } } - } - assertEquals(cluster.getJettySolrRunners().size(), workersComplete); - //Ensure that destinationCollection actually has the new docs. - expression = StreamExpressionParser.parse("search(parallelDestinationCollection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - - Tuple tuple = tuples.get(0); - assert(tuple.getLong("id") == 0); - assert(tuple.get("a_s").equals("hello0")); - assert(tuple.getLong("a_i") == 0); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); - - tuple = tuples.get(1); - assert(tuple.getLong("id") == 1); - assert(tuple.get("a_s").equals("hello1")); - assert(tuple.getLong("a_i") == 1); - assert(tuple.getDouble("a_f") == 1.0); - assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); - - tuple = tuples.get(2); - assert(tuple.getLong("id") == 2); - assert(tuple.get("a_s").equals("hello2")); - assert(tuple.getLong("a_i") == 2); - assert(tuple.getDouble("a_f") == 0.0); - assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); - - tuple = tuples.get(3); - assert(tuple.getLong("id") == 3); - assert(tuple.get("a_s").equals("hello3")); - assert(tuple.getLong("a_i") == 3); - assert(tuple.getDouble("a_f") == 3.0); - assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); - - tuple = tuples.get(4); - assert(tuple.getLong("id") == 4); - assert(tuple.get("a_s").equals("hello4")); - assert(tuple.getLong("a_i") == 4); - assert(tuple.getDouble("a_f") == 4.0); - assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); - assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); - - CollectionAdminRequest.deleteCollection("parallelDestinationCollection1").process(cluster.getSolrClient()); + assertEquals(cluster.getJettySolrRunners().size(), workersComplete); + //Ensure that destinationCollection actually has the new docs. + expression = StreamExpressionParser.parse("search(parallelDestinationCollection1, q=*:*, fl=\"id,a_s,a_i,a_f,s_multi,i_multi\", sort=\"a_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + + Tuple tuple = tuples.get(0); + assert (tuple.getLong("id") == 0); + assert (tuple.get("a_s").equals("hello0")); + assert (tuple.getLong("a_i") == 0); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa", "bbbb"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4"), Long.parseLong("7")); + + tuple = tuples.get(1); + assert (tuple.getLong("id") == 1); + assert (tuple.get("a_s").equals("hello1")); + assert (tuple.getLong("a_i") == 1); + assert (tuple.getDouble("a_f") == 1.0); + assertList(tuple.getStrings("s_multi"), "aaaa4", "bbbb4"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44444"), Long.parseLong("77777")); + + tuple = tuples.get(2); + assert (tuple.getLong("id") == 2); + assert (tuple.get("a_s").equals("hello2")); + assert (tuple.getLong("a_i") == 2); + assert (tuple.getDouble("a_f") == 0.0); + assertList(tuple.getStrings("s_multi"), "aaaa1", "bbbb1"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("44"), Long.parseLong("77")); + + tuple = tuples.get(3); + assert (tuple.getLong("id") == 3); + assert (tuple.get("a_s").equals("hello3")); + assert (tuple.getLong("a_i") == 3); + assert (tuple.getDouble("a_f") == 3.0); + assertList(tuple.getStrings("s_multi"), "aaaa2", "bbbb2"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("444"), Long.parseLong("777")); + + tuple = tuples.get(4); + assert (tuple.getLong("id") == 4); + assert (tuple.get("a_s").equals("hello4")); + assert (tuple.getLong("a_i") == 4); + assert (tuple.getDouble("a_f") == 4.0); + assertList(tuple.getStrings("s_multi"), "aaaa3", "bbbb3"); + assertList(tuple.getLongs("i_multi"), Long.parseLong("4444"), Long.parseLong("7777")); + } finally { + CollectionAdminRequest.deleteCollection("parallelDestinationCollection1").process(cluster.getSolrClient()); + solrClientCache.close(); + } } //////////////////////////////////////////// @@ -4293,23 +4626,30 @@ public void testIntersectStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("intersect", IntersectStream.class); - - // basic - expression = StreamExpressionParser.parse("intersect(" - + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\")," - + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\")," - + "on=\"a_i\")"); - stream = new IntersectStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0, 7, 3, 4, 8); + try { + // basic + expression = StreamExpressionParser.parse("intersect(" + + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\")," + + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\")," + + "on=\"a_i\")"); + stream = new IntersectStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 5); + assertOrder(tuples, 0, 7, 3, 4, 8); + } finally { + solrClientCache.close(); + } } @Test @@ -4447,125 +4787,535 @@ public void testClassifyStream() throws Exception { CollectionAdminRequest.deleteCollection("checkpointCollection").process(cluster.getSolrClient()); } - @Test - public void testAnalyzeEvaluator() throws Exception { + @Test + public void testCalculatorStream() throws Exception { + String expr = "select(calc(), add(1, 1) as result)"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + SolrStream solrStream = new SolrStream(url, paramsLoc); + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + Tuple t = tuples.get(0); + assertTrue(t.getLong("result").equals(2L)); + } + + @Test + public void testAnalyzeEvaluator() throws Exception { + + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.add(id, "1", "test_t", "l b c d c"); + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); + + + SolrClientCache cache = new SolrClientCache(); + try { + + String expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + + SolrStream solrStream = new SolrStream(url, paramsLoc); + + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 5); + + Tuple t = tuples.get(0); + assertTrue(t.getString("test_t").equals("l")); + assertTrue(t.getString("id").equals("1")); + + t = tuples.get(1); + assertTrue(t.getString("test_t").equals("b")); + assertTrue(t.getString("id").equals("1")); + + + t = tuples.get(2); + assertTrue(t.getString("test_t").equals("c")); + assertTrue(t.getString("id").equals("1")); + + + t = tuples.get(3); + assertTrue(t.getString("test_t").equals("d")); + assertTrue(t.getString("id").equals("1")); + + t = tuples.get(4); + assertTrue(t.getString("test_t").equals("c")); + assertTrue(t.getString("id").equals("1")); + + //Try with single param + expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t) as test_t)"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 5); + + t = tuples.get(0); + assertTrue(t.getString("test_t").equals("l")); + assertTrue(t.getString("id").equals("1")); + + t = tuples.get(1); + assertTrue(t.getString("test_t").equals("b")); + assertTrue(t.getString("id").equals("1")); + + + t = tuples.get(2); + assertTrue(t.getString("test_t").equals("c")); + assertTrue(t.getString("id").equals("1")); + + + t = tuples.get(3); + assertTrue(t.getString("test_t").equals("d")); + assertTrue(t.getString("id").equals("1")); + + t = tuples.get(4); + assertTrue(t.getString("test_t").equals("c")); + assertTrue(t.getString("id").equals("1")); + + //Try with null in the test_t field + expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + + //Test annotating tuple + expr = "select(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test1_t)"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + List l = (List)tuples.get(0).get("test1_t"); + assertTrue(l.get(0).equals("l")); + assertTrue(l.get(1).equals("b")); + assertTrue(l.get(2).equals("c")); + assertTrue(l.get(3).equals("d")); + assertTrue(l.get(4).equals("c")); + } finally { + cache.close(); + } + } + + @Test + public void testEchoStream() throws Exception { + String expr = "echo(hello world)"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); + + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + String s = (String)tuples.get(0).get("echo"); + assertTrue(s.equals("hello world")); + + expr = "echo(\"hello world\")"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + s = (String)tuples.get(0).get("echo"); + assertTrue(s.equals("hello world")); + + expr = "echo(\"hello, world\")"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + s = (String)tuples.get(0).get("echo"); + assertTrue(s.equals("hello, world")); + + expr = "echo(\"hello, \\\"t\\\" world\")"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + s = (String)tuples.get(0).get("echo"); + + assertTrue(s.equals("hello, \"t\" world")); + + expr = "parallel("+COLLECTIONORALIAS+", workers=2, sort=\"echo asc\", echo(\"hello, \\\"t\\\" world\"))"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 2); + s = (String)tuples.get(0).get("echo"); + assertTrue(s.equals("hello, \"t\" world")); + s = (String)tuples.get(1).get("echo"); + assertTrue(s.equals("hello, \"t\" world")); + + expr = "echo(\"tuytuy iuyiuyi iuyiuyiu iuyiuyiuyiu iuyi iuyiyiuy iuyiuyiu iyiuyiu iyiuyiuyyiyiu yiuyiuyi" + + " yiuyiuyi yiuyiuuyiu yiyiuyiyiu iyiuyiuyiuiuyiu yiuyiuyi yiuyiy yiuiyiuiuy\")"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + solrStream = new SolrStream(url, paramsLoc); + + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + s = (String)tuples.get(0).get("echo"); + + assertTrue(s.equals("tuytuy iuyiuyi iuyiuyiu iuyiuyiuyiu iuyi iuyiyiuy iuyiuyiu iyiuyiu iyiuyiuyyiyiu yiuyiuyi yiuyiuyi " + + "yiuyiuuyiu yiyiuyiyiu iyiuyiuyiuiuyiu yiuyiuyi yiuyiy yiuiyiuiuy")); + + + + } + + + @Test + public void testEvalStream() throws Exception { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.add(id, "hello", "test_t", "l b c d c"); + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); + + String expr = "eval(select(echo(\"search("+COLLECTIONORALIAS+", q=\\\"*:*\\\", fl=id, sort=\\\"id desc\\\")\"), echo as expr_s))"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); + + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + String s = (String)tuples.get(0).get("id"); + assertTrue(s.equals("hello")); + + } + + private String getDateString(String year, String month, String day) { + return year+"-"+month+"-"+day+"T00:00:00Z"; + + } + + @Test + public void testTimeSeriesStream() throws Exception { + UpdateRequest updateRequest = new UpdateRequest(); + + int i=0; + while(i<50) { + updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00"); + } + + while(i<100) { + updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0"); + } + + while(i<150) { + updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0"); + } + + while(i<250) { + updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00"); + } + + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); + + String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " + + "end=\"2016-12-01T01:00:00.000Z\", " + + "gap=\"+1YEAR\", " + + "field=\"test_dt\", " + + "count(*), sum(price_f), max(price_f), min(price_f))"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); + + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 4); + + assertTrue(tuples.get(0).get("test_dt").equals("2013-01-01T01:00:00Z")); + assertTrue(tuples.get(0).getLong("count(*)").equals(100L)); + assertTrue(tuples.get(0).getDouble("sum(price_f)").equals(10000D)); + assertTrue(tuples.get(0).getDouble("max(price_f)").equals(100D)); + assertTrue(tuples.get(0).getDouble("min(price_f)").equals(100D)); + + assertTrue(tuples.get(1).get("test_dt").equals("2014-01-01T01:00:00Z")); + assertTrue(tuples.get(1).getLong("count(*)").equals(50L)); + assertTrue(tuples.get(1).getDouble("sum(price_f)").equals(25000D)); + assertTrue(tuples.get(1).getDouble("max(price_f)").equals(500D)); + assertTrue(tuples.get(1).getDouble("min(price_f)").equals(500D)); + + assertTrue(tuples.get(2).get("test_dt").equals("2015-01-01T01:00:00Z")); + assertTrue(tuples.get(2).getLong("count(*)").equals(50L)); + assertTrue(tuples.get(2).getDouble("sum(price_f)").equals(15000D)); + assertTrue(tuples.get(2).getDouble("max(price_f)").equals(300D)); + assertTrue(tuples.get(2).getDouble("min(price_f)").equals(300D)); + + assertTrue(tuples.get(3).get("test_dt").equals("2016-01-01T01:00:00Z")); + assertTrue(tuples.get(3).getLong("count(*)").equals(50L)); + assertTrue(tuples.get(3).getDouble("sum(price_f)").equals(20000D)); + assertTrue(tuples.get(3).getDouble("max(price_f)").equals(400D)); + assertTrue(tuples.get(3).getDouble("min(price_f)").equals(400D)); + + } + + + + + @Test + public void testListStream() throws Exception { UpdateRequest updateRequest = new UpdateRequest(); - updateRequest.add(id, "1", "test_t", "l b c d c"); + updateRequest.add(id, "hello", "test_t", "l b c d c"); + updateRequest.add(id, "hello1", "test_t", "l b c d c"); + updateRequest.add(id, "hello2", "test_t", "l b c d c"); + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); + String expr1 = "search("+COLLECTIONORALIAS+", q=\"id:hello\", fl=id, sort=\"id desc\")"; + String expr2 = "search("+COLLECTIONORALIAS+", q=\"id:hello1\", fl=id, sort=\"id desc\")"; + String expr3 = "search("+COLLECTIONORALIAS+", q=\"id:hello2\", fl=id, sort=\"id desc\")"; - SolrClientCache cache = new SolrClientCache(); - try { + String cat = "list("+expr1+","+expr2+","+expr3+")"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", cat); + paramsLoc.set("qt", "/stream"); - String expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)"; - ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); - paramsLoc.set("expr", expr); - paramsLoc.set("qt", "/stream"); - String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); - SolrStream solrStream = new SolrStream(url, paramsLoc); + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 3); + String s = (String)tuples.get(0).get("id"); + assertTrue(s.equals("hello")); + s = (String)tuples.get(1).get("id"); + assertTrue(s.equals("hello1")); + s = (String)tuples.get(2).get("id"); + assertTrue(s.equals("hello2")); + } - StreamContext context = new StreamContext(); - solrStream.setStreamContext(context); - List tuples = getTuples(solrStream); - assertTrue(tuples.size() == 5); + @Test + public void testCellStream() throws Exception { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.add(id, "hello", "test_t", "l b c d c e"); + updateRequest.add(id, "hello1", "test_t", "l b c d c"); - Tuple t = tuples.get(0); - assertTrue(t.getString("test_t").equals("l")); - assertTrue(t.getString("id").equals("1")); + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); - t = tuples.get(1); - assertTrue(t.getString("test_t").equals("b")); - assertTrue(t.getString("id").equals("1")); + String expr = "search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id,test_t\", sort=\"id desc\")"; + String cat = "cell(results,"+expr+")"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", cat); + paramsLoc.set("qt", "/stream"); + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); - t = tuples.get(2); - assertTrue(t.getString("test_t").equals("c")); - assertTrue(t.getString("id").equals("1")); + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + List results = (List)tuples.get(0).get("results"); + assertTrue(results.get(0).get("id").equals("hello1")); + assertTrue(results.get(0).get("test_t").equals("l b c d c")); + assertTrue(results.get(1).get("id").equals("hello")); + assertTrue(results.get(1).get("test_t").equals("l b c d c e")); + } - t = tuples.get(3); - assertTrue(t.getString("test_t").equals("d")); - assertTrue(t.getString("id").equals("1")); + @Test + public void testLetGetStream() throws Exception { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.add(id, "hello", "test_t", "l b c d c e"); + updateRequest.add(id, "hello1", "test_t", "l b c d c"); - t = tuples.get(4); - assertTrue(t.getString("test_t").equals("c")); - assertTrue(t.getString("id").equals("1")); + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); + String expr = "search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id,test_t\", sort=\"id desc\")"; + String cat = "let(cell(results,"+expr+"), get(results))"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", cat); + paramsLoc.set("qt", "/stream"); - //Try with single param - expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t) as test_t)"; - paramsLoc = new ModifiableSolrParams(); - paramsLoc.set("expr", expr); - paramsLoc.set("qt", "/stream"); + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); - solrStream = new SolrStream(url, paramsLoc); + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 2); + assertTrue(tuples.get(0).get("id").equals("hello1")); + assertTrue(tuples.get(0).get("test_t").equals("l b c d c")); + assertTrue(tuples.get(1).get("id").equals("hello")); + assertTrue(tuples.get(1).get("test_t").equals("l b c d c e")); - context = new StreamContext(); - solrStream.setStreamContext(context); - tuples = getTuples(solrStream); - assertTrue(tuples.size() == 5); - t = tuples.get(0); - assertTrue(t.getString("test_t").equals("l")); - assertTrue(t.getString("id").equals("1")); + //Test there are no side effects when transforming tuples. + expr = "search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id,test_t\", sort=\"id desc\")"; + cat = "let(cell(results,"+expr+"), list(select(get(results), id as newid, test_t), get(results)))"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", cat); + paramsLoc.set("qt", "/stream"); - t = tuples.get(1); - assertTrue(t.getString("test_t").equals("b")); - assertTrue(t.getString("id").equals("1")); + solrStream = new SolrStream(url, paramsLoc); + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 4); + assertTrue(tuples.get(0).get("newid").equals("hello1")); + assertTrue(tuples.get(0).get("test_t").equals("l b c d c")); + assertTrue(tuples.get(1).get("newid").equals("hello")); + assertTrue(tuples.get(1).get("test_t").equals("l b c d c e")); + assertTrue(tuples.get(2).get("id").equals("hello1")); + assertTrue(tuples.get(2).get("test_t").equals("l b c d c")); + assertTrue(tuples.get(3).get("id").equals("hello")); + assertTrue(tuples.get(3).get("test_t").equals("l b c d c e")); + + //Test multiple lets + + //Test there are no side effects when transforming tuples. + expr = "search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id,test_t\", sort=\"id desc\")"; + String expr1 = "search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id,test_t\", sort=\"id asc\")"; + + cat = "let(cell(results,"+expr+"), cell(results1,"+expr1+"), list(select(get(results), id as newid, test_t), get(results1)))"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", cat); + paramsLoc.set("qt", "/stream"); - t = tuples.get(2); - assertTrue(t.getString("test_t").equals("c")); - assertTrue(t.getString("id").equals("1")); + solrStream = new SolrStream(url, paramsLoc); + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 4); + assertTrue(tuples.get(0).get("newid").equals("hello1")); + assertTrue(tuples.get(0).get("test_t").equals("l b c d c")); + assertTrue(tuples.get(1).get("newid").equals("hello")); + assertTrue(tuples.get(1).get("test_t").equals("l b c d c e")); + assertTrue(tuples.get(2).get("id").equals("hello")); + assertTrue(tuples.get(2).get("test_t").equals("l b c d c e")); + assertTrue(tuples.get(3).get("id").equals("hello1")); + assertTrue(tuples.get(3).get("test_t").equals("l b c d c")); + } - t = tuples.get(3); - assertTrue(t.getString("test_t").equals("d")); - assertTrue(t.getString("id").equals("1")); + @Test + public void testConvertEvaluator() throws Exception { - t = tuples.get(4); - assertTrue(t.getString("test_t").equals("c")); - assertTrue(t.getString("id").equals("1")); + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.add(id, "1", "miles_i", "50"); + updateRequest.add(id, "2", "miles_i", "70"); + updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Try with null in the test_t field - expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)"; - paramsLoc = new ModifiableSolrParams(); - paramsLoc.set("expr", expr); - paramsLoc.set("qt", "/stream"); + //Test annotating tuple + String expr = "select(calc(), convert(miles, kilometers, 10) as kilometers)"; + ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); - solrStream = new SolrStream(url, paramsLoc); + String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; + TupleStream solrStream = new SolrStream(url, paramsLoc); - context = new StreamContext(); - solrStream.setStreamContext(context); - tuples = getTuples(solrStream); - assertTrue(tuples.size() == 1); + StreamContext context = new StreamContext(); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + double d = (double)tuples.get(0).get("kilometers"); + assertTrue(d == (double)(10*1.61)); - //Test annotating tuple - expr = "select(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test1_t)"; - paramsLoc = new ModifiableSolrParams(); - paramsLoc.set("expr", expr); - paramsLoc.set("qt", "/stream"); - solrStream = new SolrStream(url, paramsLoc); + expr = "select(search("+COLLECTIONORALIAS+", q=\"*:*\", sort=\"miles_i asc\", fl=\"miles_i\"), convert(miles, kilometers, miles_i) as kilometers)"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); - context = new StreamContext(); - solrStream.setStreamContext(context); - tuples = getTuples(solrStream); - assertTrue(tuples.size() == 1); - List l = (List)tuples.get(0).get("test1_t"); - assertTrue(l.get(0).equals("l")); - assertTrue(l.get(1).equals("b")); - assertTrue(l.get(2).equals("c")); - assertTrue(l.get(3).equals("d")); - assertTrue(l.get(4).equals("c")); - } finally { - cache.close(); - } - } + solrStream = new SolrStream(url, paramsLoc); + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 2); + d = (double)tuples.get(0).get("kilometers"); + assertTrue(d == (double)(50*1.61)); + d = (double)tuples.get(1).get("kilometers"); + assertTrue(d == (double)(70*1.61)); + expr = "parallel("+COLLECTIONORALIAS+", workers=2, sort=\"miles_i asc\", select(search("+COLLECTIONORALIAS+", q=\"*:*\", partitionKeys=miles_i, sort=\"miles_i asc\", fl=\"miles_i\"), convert(miles, kilometers, miles_i) as kilometers))"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + solrStream = new SolrStream(url, paramsLoc); + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 2); + d = (double)tuples.get(0).get("kilometers"); + assertTrue(d == (double)(50*1.61)); + d = (double)tuples.get(1).get("kilometers"); + assertTrue(d == (double)(70*1.61)); + + expr = "select(stats("+COLLECTIONORALIAS+", q=\"*:*\", sum(miles_i)), convert(miles, kilometers, sum(miles_i)) as kilometers)"; + paramsLoc = new ModifiableSolrParams(); + paramsLoc.set("expr", expr); + paramsLoc.set("qt", "/stream"); + solrStream = new SolrStream(url, paramsLoc); + context = new StreamContext(); + solrStream.setStreamContext(context); + tuples = getTuples(solrStream); + assertTrue(tuples.size() == 1); + d = (double)tuples.get(0).get("kilometers"); + assertTrue(d == (double)(120*1.61)); + } @Test public void testExecutorStream() throws Exception { @@ -4735,6 +5485,9 @@ public void testBasicTextLogitStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) @@ -4743,67 +5496,72 @@ public void testBasicTextLogitStream() throws Exception { .withFunctionName("train", TextLogitStream.class) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("update", UpdateStream.class); + try { + expression = StreamExpressionParser.parse("features(collection1, q=\"*:*\", featureSet=\"first\", field=\"tv_text\", outcome=\"out_i\", numTerms=4)"); + stream = new FeaturesSelectionStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - expression = StreamExpressionParser.parse("features(collection1, q=\"*:*\", featureSet=\"first\", field=\"tv_text\", outcome=\"out_i\", numTerms=4)"); - stream = new FeaturesSelectionStream(expression, factory); - tuples = getTuples(stream); + assert (tuples.size() == 4); + HashSet terms = new HashSet<>(); + for (Tuple tuple : tuples) { + terms.add((String) tuple.get("term_s")); + } + assertTrue(terms.contains("d")); + assertTrue(terms.contains("c")); + assertTrue(terms.contains("e")); + assertTrue(terms.contains("f")); - assert(tuples.size() == 4); - HashSet terms = new HashSet<>(); - for (Tuple tuple : tuples) { - terms.add((String) tuple.get("term_s")); + String textLogitExpression = "train(" + + "collection1, " + + "features(collection1, q=\"*:*\", featureSet=\"first\", field=\"tv_text\", outcome=\"out_i\", numTerms=4)," + + "q=\"*:*\", " + + "name=\"model\", " + + "field=\"tv_text\", " + + "outcome=\"out_i\", " + + "maxIterations=100)"; + stream = factory.constructStream(textLogitExpression); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + Tuple lastTuple = tuples.get(tuples.size() - 1); + List lastWeights = lastTuple.getDoubles("weights_ds"); + Double[] lastWeightsArray = lastWeights.toArray(new Double[lastWeights.size()]); + + // first feature is bias value + Double[] testRecord = {1.0, 1.17, 0.691, 0.0, 0.0}; + double d = sum(multiply(testRecord, lastWeightsArray)); + double prob = sigmoid(d); + assertEquals(prob, 1.0, 0.1); + + // first feature is bias value + Double[] testRecord2 = {1.0, 0.0, 0.0, 1.17, 0.691}; + d = sum(multiply(testRecord2, lastWeightsArray)); + prob = sigmoid(d); + assertEquals(prob, 0, 0.1); + + stream = factory.constructStream("update(destinationCollection, batchSize=5, " + textLogitExpression + ")"); + getTuples(stream); + cluster.getSolrClient().commit("destinationCollection"); + + stream = factory.constructStream("search(destinationCollection, " + + "q=*:*, " + + "fl=\"iteration_i,* \", " + + "rows=100, " + + "sort=\"iteration_i desc\")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(100, tuples.size()); + Tuple lastModel = tuples.get(0); + ClassificationEvaluation evaluation = ClassificationEvaluation.create(lastModel.fields); + assertTrue(evaluation.getF1() >= 1.0); + assertEquals(Math.log(5000.0 / (2500 + 1)), lastModel.getDoubles("idfs_ds").get(0), 0.0001); + // make sure the tuples is retrieved in correct order + Tuple firstTuple = tuples.get(99); + assertEquals(1L, (long) firstTuple.getLong("iteration_i")); + } finally { + CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + solrClientCache.close(); } - assertTrue(terms.contains("d")); - assertTrue(terms.contains("c")); - assertTrue(terms.contains("e")); - assertTrue(terms.contains("f")); - - String textLogitExpression = "train(" + - "collection1, " + - "features(collection1, q=\"*:*\", featureSet=\"first\", field=\"tv_text\", outcome=\"out_i\", numTerms=4),"+ - "q=\"*:*\", " + - "name=\"model\", " + - "field=\"tv_text\", " + - "outcome=\"out_i\", " + - "maxIterations=100)"; - stream = factory.constructStream(textLogitExpression); - tuples = getTuples(stream); - Tuple lastTuple = tuples.get(tuples.size() - 1); - List lastWeights = lastTuple.getDoubles("weights_ds"); - Double[] lastWeightsArray = lastWeights.toArray(new Double[lastWeights.size()]); - - // first feature is bias value - Double[] testRecord = {1.0, 1.17, 0.691, 0.0, 0.0}; - double d = sum(multiply(testRecord, lastWeightsArray)); - double prob = sigmoid(d); - assertEquals(prob, 1.0, 0.1); - - // first feature is bias value - Double[] testRecord2 = {1.0, 0.0, 0.0, 1.17, 0.691}; - d = sum(multiply(testRecord2, lastWeightsArray)); - prob = sigmoid(d); - assertEquals(prob, 0, 0.1); - - stream = factory.constructStream("update(destinationCollection, batchSize=5, "+textLogitExpression+")"); - getTuples(stream); - cluster.getSolrClient().commit("destinationCollection"); - - stream = factory.constructStream("search(destinationCollection, " + - "q=*:*, " + - "fl=\"iteration_i,* \", " + - "rows=100, " + - "sort=\"iteration_i desc\")"); - tuples = getTuples(stream); - assertEquals(100, tuples.size()); - Tuple lastModel = tuples.get(0); - ClassificationEvaluation evaluation = ClassificationEvaluation.create(lastModel.fields); - assertTrue(evaluation.getF1() >= 1.0); - assertEquals(Math.log( 5000.0 / (2500 + 1)), lastModel.getDoubles("idfs_ds").get(0), 0.0001); - // make sure the tuples is retrieved in correct order - Tuple firstTuple = tuples.get(99); - assertEquals(1L, (long) firstTuple.getLong("iteration_i")); - - CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); } private double sigmoid(double in) { @@ -4855,20 +5613,30 @@ public void testParallelIntersectStream() throws Exception { .withFunctionName("intersect", IntersectStream.class) .withFunctionName("parallel", ParallelStream.class); // basic - - String zkHost = cluster.getZkServer().getZkAddress(); - final TupleStream stream = streamFactory.constructStream("parallel(" - + "collection1, " - + "intersect(" + + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + String zkHost = cluster.getZkServer().getZkAddress(); + final TupleStream stream = streamFactory.constructStream("parallel(" + + "collection1, " + + "intersect(" + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\", partitionKeys=\"a_i\")," + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\")," + "on=\"a_i\")," - + "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i asc\")"); - final List tuples = getTuples(stream); - - assert(tuples.size() == 5); - assertOrder(tuples, 0,7,3,4,8); + + "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i asc\")"); + + stream.setStreamContext(streamContext); + + final List tuples = getTuples(stream); + assert (tuples.size() == 5); + assertOrder(tuples, 0, 7, 3, 4, 8); + } finally { + solrClientCache.close(); + } } @Test @@ -4889,6 +5657,9 @@ public void testFeaturesSelectionStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) @@ -4897,35 +5668,42 @@ public void testFeaturesSelectionStream() throws Exception { .withFunctionName("search", CloudSolrStream.class) .withFunctionName("update", UpdateStream.class); - String featuresExpression = "featuresSelection(collection1, q=\"*:*\", featureSet=\"first\", field=\"whitetok\", outcome=\"out_i\", numTerms=4)"; - // basic - expression = StreamExpressionParser.parse(featuresExpression); - stream = new FeaturesSelectionStream(expression, factory); - tuples = getTuples(stream); - assert(tuples.size() == 4); + try { + String featuresExpression = "featuresSelection(collection1, q=\"*:*\", featureSet=\"first\", field=\"whitetok\", outcome=\"out_i\", numTerms=4)"; + // basic + expression = StreamExpressionParser.parse(featuresExpression); + stream = new FeaturesSelectionStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); - assertTrue(tuples.get(0).get("term_s").equals("c")); - assertTrue(tuples.get(1).get("term_s").equals("d")); - assertTrue(tuples.get(2).get("term_s").equals("e")); - assertTrue(tuples.get(3).get("term_s").equals("f")); + assert (tuples.size() == 4); - // update - expression = StreamExpressionParser.parse("update(destinationCollection, batchSize=5, "+featuresExpression+")"); - stream = new UpdateStream(expression, factory); - getTuples(stream); - cluster.getSolrClient().commit("destinationCollection"); + assertTrue(tuples.get(0).get("term_s").equals("c")); + assertTrue(tuples.get(1).get("term_s").equals("d")); + assertTrue(tuples.get(2).get("term_s").equals("e")); + assertTrue(tuples.get(3).get("term_s").equals("f")); - expression = StreamExpressionParser.parse("search(destinationCollection, q=featureSet_s:first, fl=\"index_i, term_s\", sort=\"index_i asc\")"); - stream = new CloudSolrStream(expression, factory); - tuples = getTuples(stream); - assertEquals(4, tuples.size()); - assertTrue(tuples.get(0).get("term_s").equals("c")); - assertTrue(tuples.get(1).get("term_s").equals("d")); - assertTrue(tuples.get(2).get("term_s").equals("e")); - assertTrue(tuples.get(3).get("term_s").equals("f")); + // update + expression = StreamExpressionParser.parse("update(destinationCollection, batchSize=5, " + featuresExpression + ")"); + stream = new UpdateStream(expression, factory); + stream.setStreamContext(streamContext); + getTuples(stream); + cluster.getSolrClient().commit("destinationCollection"); - CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + expression = StreamExpressionParser.parse("search(destinationCollection, q=featureSet_s:first, fl=\"index_i, term_s\", sort=\"index_i asc\")"); + stream = new CloudSolrStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(4, tuples.size()); + assertTrue(tuples.get(0).get("term_s").equals("c")); + assertTrue(tuples.get(1).get("term_s").equals("d")); + assertTrue(tuples.get(2).get("term_s").equals("e")); + assertTrue(tuples.get(3).get("term_s").equals("f")); + } finally { + CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); + solrClientCache.close(); + } } @@ -5160,23 +5938,30 @@ public void testComplementStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("complement", ComplementStream.class); - - // basic - expression = StreamExpressionParser.parse("complement(" - + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\")," - + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\")," - + "on=\"a_i\")"); - stream = new ComplementStream(expression, factory); - tuples = getTuples(stream); - - assert(tuples.size() == 1); - assertOrder(tuples, 2); + try { + // basic + expression = StreamExpressionParser.parse("complement(" + + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\")," + + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\")," + + "on=\"a_i\")"); + stream = new ComplementStream(expression, factory); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assert (tuples.size() == 1); + assertOrder(tuples, 2); + } finally { + solrClientCache.close(); + } } @Test @@ -5190,6 +5975,9 @@ public void testCartesianProductStream() throws Exception { StreamExpression expression; TupleStream stream; List tuples; + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) @@ -5197,117 +5985,125 @@ public void testCartesianProductStream() throws Exception { .withFunctionName("cartesian", CartesianProductStream.class); // single selection, no sort - stream = factory.constructStream("cartesian(" - + "search(collection1, q=*:*, fl=\"id,a_ss\", sort=\"id asc\")," - + "a_ss" - + ")"); - tuples = getTuples(stream); - - assertEquals(10, tuples.size()); - assertOrder(tuples, 0,0,0,0,0,1,1,1,1,1); - assertEquals("a", tuples.get(0).get("a_ss")); - assertEquals("c", tuples.get(2).get("a_ss")); - assertEquals("a", tuples.get(5).get("a_ss")); - assertEquals("c", tuples.get(7).get("a_ss")); - - // single selection, sort - stream = factory.constructStream("cartesian(" - + "search(collection1, q=*:*, fl=\"id,a_ss\", sort=\"id asc\")," - + "a_ss," - + "productSort=\"a_ss DESC\"" - + ")"); - tuples = getTuples(stream); - - assertEquals(10, tuples.size()); - assertOrder(tuples, 0,0,0,0,0,1,1,1,1,1); - assertEquals("e", tuples.get(0).get("a_ss")); - assertEquals("c", tuples.get(2).get("a_ss")); - assertEquals("e", tuples.get(5).get("a_ss")); - assertEquals("c", tuples.get(7).get("a_ss")); - - // multi selection, sort - stream = factory.constructStream("cartesian(" - + "search(collection1, q=*:*, fl=\"id,a_ss,b_ls\", sort=\"id asc\")," - + "a_ss," - + "b_ls," - + "productSort=\"a_ss ASC\"" - + ")"); - tuples = getTuples(stream); - - assertEquals(20, tuples.size()); // (5 * 3) + 5 - assertOrder(tuples, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1); - assertEquals("a", tuples.get(0).get("a_ss")); - assertEquals(1L, tuples.get(0).get("b_ls")); - assertEquals("a", tuples.get(1).get("a_ss")); - assertEquals(2L, tuples.get(1).get("b_ls")); - assertEquals("a", tuples.get(2).get("a_ss")); - assertEquals(3L, tuples.get(2).get("b_ls")); - - assertEquals("b", tuples.get(3).get("a_ss")); - assertEquals(1L, tuples.get(3).get("b_ls")); - assertEquals("b", tuples.get(4).get("a_ss")); - assertEquals(2L, tuples.get(4).get("b_ls")); - assertEquals("b", tuples.get(5).get("a_ss")); - assertEquals(3L, tuples.get(5).get("b_ls")); - - // multi selection, sort - stream = factory.constructStream("cartesian(" - + "search(collection1, q=*:*, fl=\"id,a_ss,b_ls\", sort=\"id asc\")," - + "a_ss," - + "b_ls," - + "productSort=\"a_ss ASC, b_ls DESC\"" - + ")"); - tuples = getTuples(stream); - - assertEquals(20, tuples.size()); // (5 * 3) + 5 - assertOrder(tuples, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1); - assertEquals("a", tuples.get(0).get("a_ss")); - assertEquals(3L, tuples.get(0).get("b_ls")); - assertEquals("a", tuples.get(1).get("a_ss")); - assertEquals(2L, tuples.get(1).get("b_ls")); - assertEquals("a", tuples.get(2).get("a_ss")); - assertEquals(1L, tuples.get(2).get("b_ls")); - - assertEquals("b", tuples.get(3).get("a_ss")); - assertEquals(3L, tuples.get(3).get("b_ls")); - assertEquals("b", tuples.get(4).get("a_ss")); - assertEquals(2L, tuples.get(4).get("b_ls")); - assertEquals("b", tuples.get(5).get("a_ss")); - assertEquals(1L, tuples.get(5).get("b_ls")); - - // multi selection, sort - stream = factory.constructStream("cartesian(" - + "search(collection1, q=*:*, fl=\"id,a_ss,b_ls\", sort=\"id asc\")," - + "a_ss," - + "b_ls," - + "productSort=\"b_ls DESC\"" - + ")"); - tuples = getTuples(stream); - - assertEquals(20, tuples.size()); // (5 * 3) + 5 - assertOrder(tuples, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1); - assertEquals("a", tuples.get(0).get("a_ss")); - assertEquals(3L, tuples.get(0).get("b_ls")); - assertEquals("b", tuples.get(1).get("a_ss")); - assertEquals(3L, tuples.get(1).get("b_ls")); - assertEquals("c", tuples.get(2).get("a_ss")); - assertEquals(3L, tuples.get(2).get("b_ls")); - assertEquals("d", tuples.get(3).get("a_ss")); - assertEquals(3L, tuples.get(3).get("b_ls")); - assertEquals("e", tuples.get(4).get("a_ss")); - assertEquals(3L, tuples.get(4).get("b_ls")); - - assertEquals("a", tuples.get(5).get("a_ss")); - assertEquals(2L, tuples.get(5).get("b_ls")); - assertEquals("b", tuples.get(6).get("a_ss")); - assertEquals(2L, tuples.get(6).get("b_ls")); - assertEquals("c", tuples.get(7).get("a_ss")); - assertEquals(2L, tuples.get(7).get("b_ls")); - assertEquals("d", tuples.get(8).get("a_ss")); - assertEquals(2L, tuples.get(8).get("b_ls")); - assertEquals("e", tuples.get(9).get("a_ss")); - assertEquals(2L, tuples.get(9).get("b_ls")); + try { + stream = factory.constructStream("cartesian(" + + "search(collection1, q=*:*, fl=\"id,a_ss\", sort=\"id asc\")," + + "a_ss" + + ")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(10, tuples.size()); + assertOrder(tuples, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1); + assertEquals("a", tuples.get(0).get("a_ss")); + assertEquals("c", tuples.get(2).get("a_ss")); + assertEquals("a", tuples.get(5).get("a_ss")); + assertEquals("c", tuples.get(7).get("a_ss")); + + // single selection, sort + stream = factory.constructStream("cartesian(" + + "search(collection1, q=*:*, fl=\"id,a_ss\", sort=\"id asc\")," + + "a_ss," + + "productSort=\"a_ss DESC\"" + + ")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(10, tuples.size()); + assertOrder(tuples, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1); + assertEquals("e", tuples.get(0).get("a_ss")); + assertEquals("c", tuples.get(2).get("a_ss")); + assertEquals("e", tuples.get(5).get("a_ss")); + assertEquals("c", tuples.get(7).get("a_ss")); + + // multi selection, sort + stream = factory.constructStream("cartesian(" + + "search(collection1, q=*:*, fl=\"id,a_ss,b_ls\", sort=\"id asc\")," + + "a_ss," + + "b_ls," + + "productSort=\"a_ss ASC\"" + + ")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(20, tuples.size()); // (5 * 3) + 5 + assertOrder(tuples, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1); + assertEquals("a", tuples.get(0).get("a_ss")); + assertEquals(1L, tuples.get(0).get("b_ls")); + assertEquals("a", tuples.get(1).get("a_ss")); + assertEquals(2L, tuples.get(1).get("b_ls")); + assertEquals("a", tuples.get(2).get("a_ss")); + assertEquals(3L, tuples.get(2).get("b_ls")); + + assertEquals("b", tuples.get(3).get("a_ss")); + assertEquals(1L, tuples.get(3).get("b_ls")); + assertEquals("b", tuples.get(4).get("a_ss")); + assertEquals(2L, tuples.get(4).get("b_ls")); + assertEquals("b", tuples.get(5).get("a_ss")); + assertEquals(3L, tuples.get(5).get("b_ls")); + + // multi selection, sort + stream = factory.constructStream("cartesian(" + + "search(collection1, q=*:*, fl=\"id,a_ss,b_ls\", sort=\"id asc\")," + + "a_ss," + + "b_ls," + + "productSort=\"a_ss ASC, b_ls DESC\"" + + ")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(20, tuples.size()); // (5 * 3) + 5 + assertOrder(tuples, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1); + assertEquals("a", tuples.get(0).get("a_ss")); + assertEquals(3L, tuples.get(0).get("b_ls")); + assertEquals("a", tuples.get(1).get("a_ss")); + assertEquals(2L, tuples.get(1).get("b_ls")); + assertEquals("a", tuples.get(2).get("a_ss")); + assertEquals(1L, tuples.get(2).get("b_ls")); + + assertEquals("b", tuples.get(3).get("a_ss")); + assertEquals(3L, tuples.get(3).get("b_ls")); + assertEquals("b", tuples.get(4).get("a_ss")); + assertEquals(2L, tuples.get(4).get("b_ls")); + assertEquals("b", tuples.get(5).get("a_ss")); + assertEquals(1L, tuples.get(5).get("b_ls")); + + // multi selection, sort + stream = factory.constructStream("cartesian(" + + "search(collection1, q=*:*, fl=\"id,a_ss,b_ls\", sort=\"id asc\")," + + "a_ss," + + "b_ls," + + "productSort=\"b_ls DESC\"" + + ")"); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + + assertEquals(20, tuples.size()); // (5 * 3) + 5 + assertOrder(tuples, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1); + assertEquals("a", tuples.get(0).get("a_ss")); + assertEquals(3L, tuples.get(0).get("b_ls")); + assertEquals("b", tuples.get(1).get("a_ss")); + assertEquals(3L, tuples.get(1).get("b_ls")); + assertEquals("c", tuples.get(2).get("a_ss")); + assertEquals(3L, tuples.get(2).get("b_ls")); + assertEquals("d", tuples.get(3).get("a_ss")); + assertEquals(3L, tuples.get(3).get("b_ls")); + assertEquals("e", tuples.get(4).get("a_ss")); + assertEquals(3L, tuples.get(4).get("b_ls")); + + assertEquals("a", tuples.get(5).get("a_ss")); + assertEquals(2L, tuples.get(5).get("b_ls")); + assertEquals("b", tuples.get(6).get("a_ss")); + assertEquals(2L, tuples.get(6).get("b_ls")); + assertEquals("c", tuples.get(7).get("a_ss")); + assertEquals(2L, tuples.get(7).get("b_ls")); + assertEquals("d", tuples.get(8).get("a_ss")); + assertEquals(2L, tuples.get(8).get("b_ls")); + assertEquals("e", tuples.get(9).get("a_ss")); + assertEquals(2L, tuples.get(9).get("b_ls")); + } finally { + solrClientCache.close(); + } } @@ -5333,20 +6129,28 @@ public void testParallelComplementStream() throws Exception { .withFunctionName("search", CloudSolrStream.class) .withFunctionName("complement", ComplementStream.class) .withFunctionName("parallel", ParallelStream.class); - - final String zkHost = cluster.getZkServer().getZkAddress(); - final TupleStream stream = streamFactory.constructStream("parallel(" - + "collection1, " - + "complement(" - + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\", partitionKeys=\"a_i\")," - + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\")," - + "on=\"a_i\")," - + "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i asc\")"); - final List tuples = getTuples(stream); - - assert(tuples.size() == 1); - assertOrder(tuples, 2); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + final String zkHost = cluster.getZkServer().getZkAddress(); + final TupleStream stream = streamFactory.constructStream("parallel(" + + "collection1, " + + "complement(" + + "search(collection1, q=a_s:(setA || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc, a_s asc\", partitionKeys=\"a_i\")," + + "search(collection1, q=a_s:(setB || setAB), fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\")," + + "on=\"a_i\")," + + "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i asc\")"); + + stream.setStreamContext(streamContext); + final List tuples = getTuples(stream); + assert (tuples.size() == 1); + assertOrder(tuples, 2); + } finally { + solrClientCache.close(); + } } protected List getTuples(TupleStream tupleStream) throws IOException { diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java index 2f2273e8b146..0de3aa052c87 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java @@ -131,13 +131,20 @@ public void testUniqueStream() throws Exception { .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParams = StreamingTest.mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - UniqueStream ustream = new UniqueStream(stream, new FieldEqualitor("a_f")); - List tuples = getTuples(ustream); - assertEquals(4, tuples.size()); - assertOrder(tuples, 0,1,3,4); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + SolrParams sParams = StreamingTest.mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + UniqueStream ustream = new UniqueStream(stream, new FieldEqualitor("a_f")); + ustream.setStreamContext(streamContext); + List tuples = getTuples(ustream); + assertEquals(4, tuples.size()); + assertOrder(tuples, 0, 1, 3, 4); + } finally { + solrClientCache.close(); + } } @Test @@ -167,15 +174,22 @@ public void testNonePartitionKeys() throws Exception { .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9") .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { - SolrParams sParamsA = StreamingTest.mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "none"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - ParallelStream pstream = parallelStream(stream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); - - assert(tuples.size() == (10 * numWorkers)); // Each tuple will be double counted. + SolrParams sParamsA = StreamingTest.mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "none"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + ParallelStream pstream = parallelStream(stream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); + assert (tuples.size() == (10 * numWorkers)); // Each tuple will be double counted. + } finally { + solrClientCache.close(); + } } @Test @@ -193,19 +207,29 @@ public void testParallelUniqueStream() throws Exception { .add(id, "8", "a_s", "hello1", "a_i", "13", "a_f", "4") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc", "partitionKeys", "a_f"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - UniqueStream ustream = new UniqueStream(stream, new FieldEqualitor("a_f")); - ParallelStream pstream = parallelStream(ustream, new FieldComparator("a_f", ComparatorOrder.ASCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); - assertEquals(5, tuples.size()); - assertOrder(tuples, 0, 1, 3, 4, 6); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { - //Test the eofTuples + SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc", "partitionKeys", "a_f"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + UniqueStream ustream = new UniqueStream(stream, new FieldEqualitor("a_f")); + ParallelStream pstream = parallelStream(ustream, new FieldComparator("a_f", ComparatorOrder.ASCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); + assertEquals(5, tuples.size()); + assertOrder(tuples, 0, 1, 3, 4, 6); - Map eofTuples = pstream.getEofTuples(); - assertEquals(numWorkers, eofTuples.size()); //There should be an EOF tuple for each worker. + //Test the eofTuples + + Map eofTuples = pstream.getEofTuples(); + assertEquals(numWorkers, eofTuples.size()); //There should be an EOF tuple for each worker. + }finally { + solrClientCache.close(); + } } @@ -226,12 +250,21 @@ public void testMultipleFqClauses() throws Exception { streamFactory.withCollectionZkHost(COLLECTIONORALIAS, zkHost); - ModifiableSolrParams params = new ModifiableSolrParams(mapParams("q", "*:*", "fl", "id,a_i", - "sort", "a_i asc", "fq", "a_ss:hello0", "fq", "a_ss:hello1")); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, params); - List tuples = getTuples(stream); - assertEquals("Multiple fq clauses should have been honored", 1, tuples.size()); - assertEquals("should only have gotten back document 0", "0", tuples.get(0).getString("id")); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + ModifiableSolrParams params = new ModifiableSolrParams(mapParams("q", "*:*", "fl", "id,a_i", + "sort", "a_i asc", "fq", "a_ss:hello0", "fq", "a_ss:hello1")); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, params); + stream.setStreamContext(streamContext); + List tuples = getTuples(stream); + assertEquals("Multiple fq clauses should have been honored", 1, tuples.size()); + assertEquals("should only have gotten back document 0", "0", tuples.get(0).getString("id")); + } finally { + solrClientCache.close(); + } } @Test @@ -245,15 +278,20 @@ public void testRankStream() throws Exception { .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - - SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - RankStream rstream = new RankStream(stream, 3, new FieldComparator("a_i",ComparatorOrder.DESCENDING)); - List tuples = getTuples(rstream); - - assertEquals(3, tuples.size()); - assertOrder(tuples, 4,3,2); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + RankStream rstream = new RankStream(stream, 3, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); + rstream.setStreamContext(streamContext); + List tuples = getTuples(rstream); + assertEquals(3, tuples.size()); + assertOrder(tuples, 4, 3, 2); + } finally { + solrClientCache.close(); + } } @Test @@ -272,22 +310,30 @@ public void testParallelRankStream() throws Exception { .add(id, "10", "a_s", "hello1", "a_i", "10", "a_f", "1") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - RankStream rstream = new RankStream(stream, 11, new FieldComparator("a_i",ComparatorOrder.DESCENDING)); - ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + RankStream rstream = new RankStream(stream, 11, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); + ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); - assertEquals(10, tuples.size()); - assertOrder(tuples, 10,9,8,7,6,5,4,3,2,0); + assertEquals(10, tuples.size()); + assertOrder(tuples, 10, 9, 8, 7, 6, 5, 4, 3, 2, 0); + } finally { + solrClientCache.close(); + } } @Test -public void testTrace() throws Exception { + public void testTrace() throws Exception { - new UpdateRequest() + new UpdateRequest() .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1") .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2") .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3") @@ -300,15 +346,24 @@ public void testTrace() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Test with spaces in the parameter lists. - SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s, a_i,a_f", "sort", "a_s asc,a_f asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - stream.setTrace(true); - List tuples = getTuples(stream); - assertEquals(COLLECTIONORALIAS, tuples.get(0).get("_COLLECTION_")); - assertEquals(COLLECTIONORALIAS, tuples.get(1).get("_COLLECTION_")); - assertEquals(COLLECTIONORALIAS, tuples.get(2).get("_COLLECTION_")); - assertEquals(COLLECTIONORALIAS, tuples.get(3).get("_COLLECTION_")); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + //Test with spaces in the parameter lists. + SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s, a_i,a_f", "sort", "a_s asc,a_f asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + stream.setTrace(true); + stream.setStreamContext(streamContext); + List tuples = getTuples(stream); + assertEquals(COLLECTIONORALIAS, tuples.get(0).get("_COLLECTION_")); + assertEquals(COLLECTIONORALIAS, tuples.get(1).get("_COLLECTION_")); + assertEquals(COLLECTIONORALIAS, tuples.get(2).get("_COLLECTION_")); + assertEquals(COLLECTIONORALIAS, tuples.get(3).get("_COLLECTION_")); + } finally { + solrClientCache.close(); + } } @Test @@ -327,52 +382,60 @@ public void testReducerStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Test with spaces in the parameter lists. - SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s, a_i, a_f", "sort", "a_s asc , a_f asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - ReducerStream rstream = new ReducerStream(stream, - new FieldEqualitor("a_s"), - new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 5)); - - List tuples = getTuples(rstream); - - assertEquals(3, tuples.size()); - - Tuple t0 = tuples.get(0); - List maps0 = t0.getMaps("group"); - assertMaps(maps0, 0, 2, 1, 9); - - Tuple t1 = tuples.get(1); - List maps1 = t1.getMaps("group"); - assertMaps(maps1, 3, 5, 7, 8); - - Tuple t2 = tuples.get(2); - List maps2 = t2.getMaps("group"); - assertMaps(maps2, 4, 6); - - //Test with spaces in the parameter lists using a comparator - sParamsA = mapParams("q", "*:*", "fl", "id,a_s, a_i, a_f", "sort", "a_s asc , a_f asc"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - rstream = new ReducerStream(stream, - new FieldComparator("a_s", ComparatorOrder.ASCENDING), - new GroupOperation(new FieldComparator("a_f", ComparatorOrder.DESCENDING), 5)); - - tuples = getTuples(rstream); - - assertEquals(3, tuples.size()); - - t0 = tuples.get(0); - maps0 = t0.getMaps("group"); - assertMaps(maps0, 9, 1, 2, 0); - - t1 = tuples.get(1); - maps1 = t1.getMaps("group"); - assertMaps(maps1, 8, 7, 5, 3); - - t2 = tuples.get(2); - maps2 = t2.getMaps("group"); - assertMaps(maps2, 6, 4); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + //Test with spaces in the parameter lists. + SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s, a_i, a_f", "sort", "a_s asc , a_f asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + ReducerStream rstream = new ReducerStream(stream, + new FieldEqualitor("a_s"), + new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 5)); + + rstream.setStreamContext(streamContext); + List tuples = getTuples(rstream); + + assertEquals(3, tuples.size()); + + Tuple t0 = tuples.get(0); + List maps0 = t0.getMaps("group"); + assertMaps(maps0, 0, 2, 1, 9); + + Tuple t1 = tuples.get(1); + List maps1 = t1.getMaps("group"); + assertMaps(maps1, 3, 5, 7, 8); + + Tuple t2 = tuples.get(2); + List maps2 = t2.getMaps("group"); + assertMaps(maps2, 4, 6); + + //Test with spaces in the parameter lists using a comparator + sParamsA = mapParams("q", "*:*", "fl", "id,a_s, a_i, a_f", "sort", "a_s asc , a_f asc"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + rstream = new ReducerStream(stream, + new FieldComparator("a_s", ComparatorOrder.ASCENDING), + new GroupOperation(new FieldComparator("a_f", ComparatorOrder.DESCENDING), 5)); + rstream.setStreamContext(streamContext); + tuples = getTuples(rstream); + + assertEquals(3, tuples.size()); + + t0 = tuples.get(0); + maps0 = t0.getMaps("group"); + assertMaps(maps0, 9, 1, 2, 0); + + t1 = tuples.get(1); + maps1 = t1.getMaps("group"); + assertMaps(maps1, 8, 7, 5, 3); + + t2 = tuples.get(2); + maps2 = t2.getMaps("group"); + assertMaps(maps2, 6, 4); + } finally { + solrClientCache.close(); + } } @Test @@ -392,17 +455,24 @@ public void testZeroReducerStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Test with spaces in the parameter lists. - SolrParams sParamsA = mapParams("q", "blah", "fl", "id,a_s, a_i, a_f", "sort", "a_s asc , a_f asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - ReducerStream rstream = new ReducerStream(stream, - new FieldEqualitor("a_s"), - new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 5)); - - List tuples = getTuples(rstream); - - assertEquals(0, tuples.size()); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + //Test with spaces in the parameter lists. + SolrParams sParamsA = mapParams("q", "blah", "fl", "id,a_s, a_i, a_f", "sort", "a_s asc , a_f asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + ReducerStream rstream = new ReducerStream(stream, + new FieldEqualitor("a_s"), + new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 5)); + rstream.setStreamContext(streamContext); + List tuples = getTuples(rstream); + + assertEquals(0, tuples.size()); + } finally { + solrClientCache.close(); + } } @Test @@ -421,56 +491,65 @@ public void testParallelReducerStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "a_s"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); - ReducerStream rstream = new ReducerStream(stream, - new FieldEqualitor("a_s"), - new GroupOperation(new FieldComparator("a_f", ComparatorOrder.DESCENDING), 5)); - ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); + try { + SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "a_s"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - assertEquals(3, tuples.size()); + ReducerStream rstream = new ReducerStream(stream, + new FieldEqualitor("a_s"), + new GroupOperation(new FieldComparator("a_f", ComparatorOrder.DESCENDING), 5)); + ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); - Tuple t0 = tuples.get(0); - List maps0 = t0.getMaps("group"); - assertMaps(maps0, 9, 1, 2, 0); + assertEquals(3, tuples.size()); - Tuple t1 = tuples.get(1); - List maps1 = t1.getMaps("group"); - assertMaps(maps1, 8, 7, 5, 3); + Tuple t0 = tuples.get(0); + List maps0 = t0.getMaps("group"); + assertMaps(maps0, 9, 1, 2, 0); - Tuple t2 = tuples.get(2); - List maps2 = t2.getMaps("group"); - assertMaps(maps2, 6, 4); + Tuple t1 = tuples.get(1); + List maps1 = t1.getMaps("group"); + assertMaps(maps1, 8, 7, 5, 3); - //Test Descending with Ascending subsort + Tuple t2 = tuples.get(2); + List maps2 = t2.getMaps("group"); + assertMaps(maps2, 6, 4); - sParamsA = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s desc,a_f asc", "partitionKeys", "a_s"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + //Test Descending with Ascending subsort - rstream = new ReducerStream(stream, - new FieldEqualitor("a_s"), - new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 3)); - pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.DESCENDING)); - attachStreamFactory(pstream); - tuples = getTuples(pstream); + sParamsA = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s desc,a_f asc", "partitionKeys", "a_s"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - assertEquals(3, tuples.size()); + rstream = new ReducerStream(stream, + new FieldEqualitor("a_s"), + new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 3)); + pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.DESCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + tuples = getTuples(pstream); - t0 = tuples.get(0); - maps0 = t0.getMaps("group"); - assertMaps(maps0, 4, 6); + assertEquals(3, tuples.size()); - t1 = tuples.get(1); - maps1 = t1.getMaps("group"); - assertMaps(maps1, 3, 5, 7); + t0 = tuples.get(0); + maps0 = t0.getMaps("group"); + assertMaps(maps0, 4, 6); - t2 = tuples.get(2); - maps2 = t2.getMaps("group"); - assertMaps(maps2, 0, 2, 1); + t1 = tuples.get(1); + maps1 = t1.getMaps("group"); + assertMaps(maps1, 3, 5, 7); + t2 = tuples.get(2); + maps2 = t2.getMaps("group"); + assertMaps(maps2, 0, 2, 1); + } finally { + solrClientCache.close(); + } } @Test @@ -490,24 +569,33 @@ public void testExceptionStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); //Test an error that comes originates from the /select handler - SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f,blah", "sort", "blah asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - ExceptionStream estream = new ExceptionStream(stream); - Tuple t = getTuple(estream); - assertTrue(t.EOF); - assertTrue(t.EXCEPTION); - assertTrue(t.getException().contains("sort param field can't be found: blah")); - - //Test an error that comes originates from the /export handler - sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f,score", "sort", "a_s asc", "qt", "/export"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - estream = new ExceptionStream(stream); - t = getTuple(estream); - assertTrue(t.EOF); - assertTrue(t.EXCEPTION); - //The /export handler will pass through a real exception. - assertTrue(t.getException().contains("undefined field:")); + try { + SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f,blah", "sort", "blah asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + ExceptionStream estream = new ExceptionStream(stream); + estream.setStreamContext(streamContext); + Tuple t = getTuple(estream); + assertTrue(t.EOF); + assertTrue(t.EXCEPTION); + assertTrue(t.getException().contains("sort param field can't be found: blah")); + + //Test an error that comes originates from the /export handler + sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f,score", "sort", "a_s asc", "qt", "/export"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + estream = new ExceptionStream(stream); + estream.setStreamContext(streamContext); + t = getTuple(estream); + assertTrue(t.EOF); + assertTrue(t.EXCEPTION); + //The /export handler will pass through a real exception. + assertTrue(t.getException().contains("undefined field:")); + } finally { + solrClientCache.close(); + } } @Test @@ -577,48 +665,55 @@ public void testStatsStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParamsA = mapParams("q", "*:*"); - - Metric[] metrics = {new SumMetric("a_i"), - new SumMetric("a_f"), - new MinMetric("a_i"), - new MinMetric("a_f"), - new MaxMetric("a_i"), - new MaxMetric("a_f"), - new MeanMetric("a_i"), - new MeanMetric("a_f"), - new CountMetric()}; - - StatsStream statsStream = new StatsStream(zkHost, COLLECTIONORALIAS, sParamsA, metrics); - - List tuples = getTuples(statsStream); - - assertEquals(1, tuples.size()); - - //Test Long and Double Sums - - Tuple tuple = tuples.get(0); - - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); - - assertEquals(70, sumi.longValue()); - assertEquals(55.0, sumf.doubleValue(), 0.01); - assertEquals(0.0, mini.doubleValue(), 0.01); - assertEquals(1.0, minf.doubleValue(), 0.01); - assertEquals(14.0, maxi.doubleValue(), 0.01); - assertEquals(10.0, maxf.doubleValue(), 0.01); - assertEquals(7.0, avgi.doubleValue(), .01); - assertEquals(5.5, avgf.doubleValue(), .001); - assertEquals(10, count.doubleValue(), .01); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + SolrParams sParamsA = mapParams("q", "*:*"); + + Metric[] metrics = {new SumMetric("a_i"), + new SumMetric("a_f"), + new MinMetric("a_i"), + new MinMetric("a_f"), + new MaxMetric("a_i"), + new MaxMetric("a_f"), + new MeanMetric("a_i"), + new MeanMetric("a_f"), + new CountMetric()}; + + StatsStream statsStream = new StatsStream(zkHost, COLLECTIONORALIAS, sParamsA, metrics); + statsStream.setStreamContext(streamContext); + List tuples = getTuples(statsStream); + + assertEquals(1, tuples.size()); + + //Test Long and Double Sums + + Tuple tuple = tuples.get(0); + + Double sumi = tuple.getDouble("sum(a_i)"); + Double sumf = tuple.getDouble("sum(a_f)"); + Double mini = tuple.getDouble("min(a_i)"); + Double minf = tuple.getDouble("min(a_f)"); + Double maxi = tuple.getDouble("max(a_i)"); + Double maxf = tuple.getDouble("max(a_f)"); + Double avgi = tuple.getDouble("avg(a_i)"); + Double avgf = tuple.getDouble("avg(a_f)"); + Double count = tuple.getDouble("count(*)"); + + assertEquals(70, sumi.longValue()); + assertEquals(55.0, sumf.doubleValue(), 0.01); + assertEquals(0.0, mini.doubleValue(), 0.01); + assertEquals(1.0, minf.doubleValue(), 0.01); + assertEquals(14.0, maxi.doubleValue(), 0.01); + assertEquals(10.0, maxf.doubleValue(), 0.01); + assertEquals(7.0, avgi.doubleValue(), .01); + assertEquals(5.5, avgf.doubleValue(), .001); + assertEquals(10, count.doubleValue(), .01); + } finally { + solrClientCache.close(); + } } @Test @@ -637,344 +732,352 @@ public void testFacetStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc"); - - Bucket[] buckets = {new Bucket("a_s")}; - - Metric[] metrics = {new SumMetric("a_i"), - new SumMetric("a_f"), - new MinMetric("a_i"), - new MinMetric("a_f"), - new MaxMetric("a_i"), - new MaxMetric("a_f"), - new MeanMetric("a_i"), - new MeanMetric("a_f"), - new CountMetric()}; - - FieldComparator[] sorts = {new FieldComparator("sum(a_i)", - ComparatorOrder.ASCENDING)}; - - FacetStream facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); - - List tuples = getTuples(facetStream); - - assert(tuples.size() == 3); - - //Test Long and Double Sums - - Tuple tuple = tuples.get(0); - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket); - assertEquals(15, sumi.longValue()); - assertEquals(11.0, sumf.doubleValue(), 0.01); - assertEquals(4.0, mini.doubleValue(), 0.01); - assertEquals(4.0, minf.doubleValue(), 0.01); - assertEquals(11.0, maxi.doubleValue(), 0.01); - assertEquals(7.0, maxf.doubleValue(), 0.01); - assertEquals(7.5, avgi.doubleValue(), 0.01); - assertEquals(5.5, avgf.doubleValue(), 0.01); - assertEquals(2, count.doubleValue(), 0.01); - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket); - assertEquals(17, sumi.doubleValue(), .01); - assertEquals(18, sumf.doubleValue(), .01); - assertEquals(0.0, mini.doubleValue(), .01); - assertEquals(1.0, minf.doubleValue(), .01); - assertEquals(14.0, maxi.doubleValue(), .01); - assertEquals(10.0, maxf.doubleValue(), .01); - assertEquals(4.25, avgi.doubleValue(), .01); - assertEquals(4.5, avgf.doubleValue(), .01); - assertEquals(4, count.doubleValue(), .01); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket); - assertEquals(38.0, sumi.doubleValue(), 0.01); - assertEquals(26.0, sumf.doubleValue(), 0.01); - assertEquals(3.0, mini.doubleValue(), 0.01); - assertEquals(3.0, minf.doubleValue(), 0.01); - assertEquals(13.0, maxi.doubleValue(), 0.01); - assertEquals(9.0, maxf.doubleValue(), 0.01); - assertEquals(9.5, avgi.doubleValue(), 0.01); - assertEquals(6.5, avgf.doubleValue(), 0.01); - assertEquals(4, count.doubleValue(), 0.01); - - - //Reverse the Sort. - - sorts[0] = new FieldComparator("sum(a_i)", ComparatorOrder.DESCENDING); - - facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); - - tuples = getTuples(facetStream); - - assertEquals(3, tuples.size()); - - //Test Long and Double Sums - - tuple = tuples.get(0); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket); - assertEquals(38, sumi.doubleValue(), 0.1); - assertEquals(26, sumf.doubleValue(), 0.1); - assertEquals(3, mini.doubleValue(), 0.1); - assertEquals(3, minf.doubleValue(), 0.1); - assertEquals(13, maxi.doubleValue(), 0.1); - assertEquals(9, maxf.doubleValue(), 0.1); - assertEquals(9.5, avgi.doubleValue(), 0.1); - assertEquals(6.5, avgf.doubleValue(), 0.1); - assertEquals(4, count.doubleValue(), 0.1); - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket); - assertEquals(17, sumi.doubleValue(), 0.01); - assertEquals(18, sumf.doubleValue(), 0.01); - assertEquals(0, mini.doubleValue(), 0.01); - assertEquals(1, minf.doubleValue(), 0.01); - assertEquals(14, maxi.doubleValue(), 0.01); - assertEquals(10, maxf.doubleValue(), 0.01); - assertEquals(4.25, avgi.doubleValue(), 0.01); - assertEquals(4.5, avgf.doubleValue(), 0.01); - assertEquals(4, count.doubleValue(), 0.01); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket); - assertEquals(15, sumi.longValue()); - assertEquals(11, sumf.doubleValue(), 0.01); - assertEquals(4.0, mini.doubleValue(), 0.01); - assertEquals(4.0, minf.doubleValue(), 0.01); - assertEquals(11.0, maxi.doubleValue(), 0.01); - assertEquals(7.0, maxf.doubleValue(), 0.01); - assertEquals(7.5, avgi.doubleValue(), 0.01); - assertEquals(5.5, avgf.doubleValue(), 0.01); - assertEquals(2, count.doubleValue(), 0.01); - - - //Test index sort - - sorts[0] = new FieldComparator("a_s", ComparatorOrder.DESCENDING); - - - facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); - - tuples = getTuples(facetStream); - - assertEquals(3, tuples.size()); - - - tuple = tuples.get(0); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - - assertEquals("hello4", bucket); - assertEquals(15, sumi.longValue()); - assertEquals(11, sumf.doubleValue(), 0.01); - assertEquals(4, mini.doubleValue(), 0.01); - assertEquals(4, minf.doubleValue(), 0.01); - assertEquals(11, maxi.doubleValue(), 0.01); - assertEquals(7, maxf.doubleValue(), 0.01); - assertEquals(7.5, avgi.doubleValue(), 0.01); - assertEquals(5.5, avgf.doubleValue(), 0.01); - assertEquals(2, count.doubleValue(), 0.01); - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertTrue(bucket.equals("hello3")); - assertTrue(sumi.doubleValue() == 38.0D); - assertTrue(sumf.doubleValue() == 26.0D); - assertTrue(mini.doubleValue() == 3.0D); - assertTrue(minf.doubleValue() == 3.0D); - assertTrue(maxi.doubleValue() == 13.0D); - assertTrue(maxf.doubleValue() == 9.0D); - assertTrue(avgi.doubleValue() == 9.5D); - assertTrue(avgf.doubleValue() == 6.5D); - assertTrue(count.doubleValue() == 4); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket); - assertEquals(17, sumi.doubleValue(), 0.01); - assertEquals(18, sumf.doubleValue(), 0.01); - assertEquals(0, mini.doubleValue(), 0.01); - assertEquals(1, minf.doubleValue(), 0.01); - assertEquals(14, maxi.doubleValue(), 0.01); - assertEquals(10, maxf.doubleValue(), 0.01); - assertEquals(4.25, avgi.doubleValue(), 0.01); - assertEquals(4.5, avgf.doubleValue(), 0.01); - assertEquals(4, count.doubleValue(), 0.01); - - //Test index sort - - sorts[0] = new FieldComparator("a_s", ComparatorOrder.ASCENDING); - - facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); - - tuples = getTuples(facetStream); - - assertEquals(3, tuples.size()); - - tuple = tuples.get(0); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket); - assertEquals(17, sumi.doubleValue(), 0.01); - assertEquals(18, sumf.doubleValue(), 0.01); - assertEquals(0, mini.doubleValue(), 0.01); - assertEquals(1, minf.doubleValue(), 0.01); - assertEquals(14, maxi.doubleValue(), 0.01); - assertEquals(10, maxf.doubleValue(), 0.01); - assertEquals(4.25, avgi.doubleValue(), 0.0001); - assertEquals(4.5, avgf.doubleValue(), 0.001); - assertEquals(4, count.doubleValue(), 0.01); - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket); - assertEquals(38, sumi.doubleValue(), 0.01); - assertEquals(26, sumf.doubleValue(), 0.01); - assertEquals(3, mini.doubleValue(), 0.01); - assertEquals(3, minf.doubleValue(), 0.01); - assertEquals(13, maxi.doubleValue(), 0.01); - assertEquals(9, maxf.doubleValue(), 0.01); - assertEquals(9.5, avgi.doubleValue(), 0.01); - assertEquals(6.5, avgf.doubleValue(), 0.01); - assertEquals(4, count.doubleValue(), 0.01); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket); - assertEquals(15, sumi.longValue()); - assertEquals(11.0, sumf.doubleValue(), 0.1); - assertEquals(4.0, mini.doubleValue(), 0.1); - assertEquals(4.0, minf.doubleValue(), 0.1); - assertEquals(11.0, maxi.doubleValue(), 0.1); - assertEquals(7.0, maxf.doubleValue(), 0.1); - assertEquals(7.5, avgi.doubleValue(), 0.1); - assertEquals(5.5, avgf.doubleValue(), 0.1); - assertEquals(2, count.doubleValue(), 0.1); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc"); + + Bucket[] buckets = {new Bucket("a_s")}; + + Metric[] metrics = {new SumMetric("a_i"), + new SumMetric("a_f"), + new MinMetric("a_i"), + new MinMetric("a_f"), + new MaxMetric("a_i"), + new MaxMetric("a_f"), + new MeanMetric("a_i"), + new MeanMetric("a_f"), + new CountMetric()}; + + FieldComparator[] sorts = {new FieldComparator("sum(a_i)", + ComparatorOrder.ASCENDING)}; + + FacetStream facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); + + List tuples = getTuples(facetStream); + + assert (tuples.size() == 3); + + //Test Long and Double Sums + + Tuple tuple = tuples.get(0); + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + Double sumf = tuple.getDouble("sum(a_f)"); + Double mini = tuple.getDouble("min(a_i)"); + Double minf = tuple.getDouble("min(a_f)"); + Double maxi = tuple.getDouble("max(a_i)"); + Double maxf = tuple.getDouble("max(a_f)"); + Double avgi = tuple.getDouble("avg(a_i)"); + Double avgf = tuple.getDouble("avg(a_f)"); + Double count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket); + assertEquals(15, sumi.longValue()); + assertEquals(11.0, sumf.doubleValue(), 0.01); + assertEquals(4.0, mini.doubleValue(), 0.01); + assertEquals(4.0, minf.doubleValue(), 0.01); + assertEquals(11.0, maxi.doubleValue(), 0.01); + assertEquals(7.0, maxf.doubleValue(), 0.01); + assertEquals(7.5, avgi.doubleValue(), 0.01); + assertEquals(5.5, avgf.doubleValue(), 0.01); + assertEquals(2, count.doubleValue(), 0.01); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket); + assertEquals(17, sumi.doubleValue(), .01); + assertEquals(18, sumf.doubleValue(), .01); + assertEquals(0.0, mini.doubleValue(), .01); + assertEquals(1.0, minf.doubleValue(), .01); + assertEquals(14.0, maxi.doubleValue(), .01); + assertEquals(10.0, maxf.doubleValue(), .01); + assertEquals(4.25, avgi.doubleValue(), .01); + assertEquals(4.5, avgf.doubleValue(), .01); + assertEquals(4, count.doubleValue(), .01); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket); + assertEquals(38.0, sumi.doubleValue(), 0.01); + assertEquals(26.0, sumf.doubleValue(), 0.01); + assertEquals(3.0, mini.doubleValue(), 0.01); + assertEquals(3.0, minf.doubleValue(), 0.01); + assertEquals(13.0, maxi.doubleValue(), 0.01); + assertEquals(9.0, maxf.doubleValue(), 0.01); + assertEquals(9.5, avgi.doubleValue(), 0.01); + assertEquals(6.5, avgf.doubleValue(), 0.01); + assertEquals(4, count.doubleValue(), 0.01); + + + //Reverse the Sort. + + sorts[0] = new FieldComparator("sum(a_i)", ComparatorOrder.DESCENDING); + + facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); + + tuples = getTuples(facetStream); + + assertEquals(3, tuples.size()); + + //Test Long and Double Sums + + tuple = tuples.get(0); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket); + assertEquals(38, sumi.doubleValue(), 0.1); + assertEquals(26, sumf.doubleValue(), 0.1); + assertEquals(3, mini.doubleValue(), 0.1); + assertEquals(3, minf.doubleValue(), 0.1); + assertEquals(13, maxi.doubleValue(), 0.1); + assertEquals(9, maxf.doubleValue(), 0.1); + assertEquals(9.5, avgi.doubleValue(), 0.1); + assertEquals(6.5, avgf.doubleValue(), 0.1); + assertEquals(4, count.doubleValue(), 0.1); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket); + assertEquals(17, sumi.doubleValue(), 0.01); + assertEquals(18, sumf.doubleValue(), 0.01); + assertEquals(0, mini.doubleValue(), 0.01); + assertEquals(1, minf.doubleValue(), 0.01); + assertEquals(14, maxi.doubleValue(), 0.01); + assertEquals(10, maxf.doubleValue(), 0.01); + assertEquals(4.25, avgi.doubleValue(), 0.01); + assertEquals(4.5, avgf.doubleValue(), 0.01); + assertEquals(4, count.doubleValue(), 0.01); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket); + assertEquals(15, sumi.longValue()); + assertEquals(11, sumf.doubleValue(), 0.01); + assertEquals(4.0, mini.doubleValue(), 0.01); + assertEquals(4.0, minf.doubleValue(), 0.01); + assertEquals(11.0, maxi.doubleValue(), 0.01); + assertEquals(7.0, maxf.doubleValue(), 0.01); + assertEquals(7.5, avgi.doubleValue(), 0.01); + assertEquals(5.5, avgf.doubleValue(), 0.01); + assertEquals(2, count.doubleValue(), 0.01); + + + //Test index sort + + sorts[0] = new FieldComparator("a_s", ComparatorOrder.DESCENDING); + + + facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); + facetStream.setStreamContext(streamContext); + + tuples = getTuples(facetStream); + + assertEquals(3, tuples.size()); + + + tuple = tuples.get(0); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + + assertEquals("hello4", bucket); + assertEquals(15, sumi.longValue()); + assertEquals(11, sumf.doubleValue(), 0.01); + assertEquals(4, mini.doubleValue(), 0.01); + assertEquals(4, minf.doubleValue(), 0.01); + assertEquals(11, maxi.doubleValue(), 0.01); + assertEquals(7, maxf.doubleValue(), 0.01); + assertEquals(7.5, avgi.doubleValue(), 0.01); + assertEquals(5.5, avgf.doubleValue(), 0.01); + assertEquals(2, count.doubleValue(), 0.01); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertTrue(bucket.equals("hello3")); + assertTrue(sumi.doubleValue() == 38.0D); + assertTrue(sumf.doubleValue() == 26.0D); + assertTrue(mini.doubleValue() == 3.0D); + assertTrue(minf.doubleValue() == 3.0D); + assertTrue(maxi.doubleValue() == 13.0D); + assertTrue(maxf.doubleValue() == 9.0D); + assertTrue(avgi.doubleValue() == 9.5D); + assertTrue(avgf.doubleValue() == 6.5D); + assertTrue(count.doubleValue() == 4); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket); + assertEquals(17, sumi.doubleValue(), 0.01); + assertEquals(18, sumf.doubleValue(), 0.01); + assertEquals(0, mini.doubleValue(), 0.01); + assertEquals(1, minf.doubleValue(), 0.01); + assertEquals(14, maxi.doubleValue(), 0.01); + assertEquals(10, maxf.doubleValue(), 0.01); + assertEquals(4.25, avgi.doubleValue(), 0.01); + assertEquals(4.5, avgf.doubleValue(), 0.01); + assertEquals(4, count.doubleValue(), 0.01); + + //Test index sort + + sorts[0] = new FieldComparator("a_s", ComparatorOrder.ASCENDING); + + facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, 100); + facetStream.setStreamContext(streamContext); + tuples = getTuples(facetStream); + + assertEquals(3, tuples.size()); + + tuple = tuples.get(0); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket); + assertEquals(17, sumi.doubleValue(), 0.01); + assertEquals(18, sumf.doubleValue(), 0.01); + assertEquals(0, mini.doubleValue(), 0.01); + assertEquals(1, minf.doubleValue(), 0.01); + assertEquals(14, maxi.doubleValue(), 0.01); + assertEquals(10, maxf.doubleValue(), 0.01); + assertEquals(4.25, avgi.doubleValue(), 0.0001); + assertEquals(4.5, avgf.doubleValue(), 0.001); + assertEquals(4, count.doubleValue(), 0.01); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket); + assertEquals(38, sumi.doubleValue(), 0.01); + assertEquals(26, sumf.doubleValue(), 0.01); + assertEquals(3, mini.doubleValue(), 0.01); + assertEquals(3, minf.doubleValue(), 0.01); + assertEquals(13, maxi.doubleValue(), 0.01); + assertEquals(9, maxf.doubleValue(), 0.01); + assertEquals(9.5, avgi.doubleValue(), 0.01); + assertEquals(6.5, avgf.doubleValue(), 0.01); + assertEquals(4, count.doubleValue(), 0.01); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket); + assertEquals(15, sumi.longValue()); + assertEquals(11.0, sumf.doubleValue(), 0.1); + assertEquals(4.0, mini.doubleValue(), 0.1); + assertEquals(4.0, minf.doubleValue(), 0.1); + assertEquals(11.0, maxi.doubleValue(), 0.1); + assertEquals(7.0, maxf.doubleValue(), 0.1); + assertEquals(7.5, avgi.doubleValue(), 0.1); + assertEquals(5.5, avgf.doubleValue(), 0.1); + assertEquals(2, count.doubleValue(), 0.1); + } finally { + solrClientCache.close(); + } } @@ -1042,7 +1145,11 @@ private void checkSort(JettySolrRunner jetty, String field, String sortDir, Stri List selectOrder = ("asc".equals(sortDir)) ? Arrays.asList(ascOrder) : Arrays.asList(descOrder); List selectOrderBool = ("asc".equals(sortDir)) ? Arrays.asList(ascOrderBool) : Arrays.asList(descOrderBool); SolrParams exportParams = mapParams("q", "*:*", "qt", "/export", "fl", "id," + field, "sort", field + " " + sortDir + ",id asc"); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); try (CloudSolrStream solrStream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, exportParams)) { + solrStream.setStreamContext(streamContext); List tuples = getTuples(solrStream); assertEquals("There should be exactly 32 responses returned", 32, tuples.size()); // Since the getTuples method doesn't return the EOF tuple, these two entries should be the same size. @@ -1053,6 +1160,8 @@ private void checkSort(JettySolrRunner jetty, String field, String sortDir, Stri "' RESTORE GETTING selectOrder from select statement after LUCENE-7548", tuples.get(idx).getString("id"), (field.startsWith("b_") ? selectOrderBool.get(idx) : selectOrder.get(idx))); } + } finally { + solrClientCache.close(); } } @@ -1081,7 +1190,12 @@ private void checkReturnValsForEmpty(String[] fields) throws IOException { } SolrParams sParams = mapParams("q", "*:*", "qt", "/export", "fl", fl.toString(), "sort", "id asc"); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try (CloudSolrStream solrStream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams)) { + solrStream.setStreamContext(streamContext); List tuples = getTuples(solrStream); assertEquals("There should be exactly 32 responses returned", 32, tuples.size()); @@ -1097,6 +1211,8 @@ private void checkReturnValsForEmpty(String[] fields) throws IOException { } } } + } finally { + solrClientCache.close(); } } @@ -1229,173 +1345,181 @@ public void testSubFacetStream() throws Exception { .add(id, "9", "level1_s", "hello0", "level2_s", "b", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_i,a_f"); - - Bucket[] buckets = {new Bucket("level1_s"), new Bucket("level2_s")}; - - Metric[] metrics = {new SumMetric("a_i"), - new CountMetric()}; - - FieldComparator[] sorts = {new FieldComparator("sum(a_i)", ComparatorOrder.DESCENDING), new FieldComparator("sum(a_i)", ComparatorOrder.DESCENDING)}; - - FacetStream facetStream = new FacetStream( - zkHost, - COLLECTIONORALIAS, - sParamsA, - buckets, - metrics, - sorts, - 100); - - List tuples = getTuples(facetStream); - assertEquals(6, tuples.size()); - - Tuple tuple = tuples.get(0); - String bucket1 = tuple.getString("level1_s"); - String bucket2 = tuple.getString("level2_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - Double count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket1); - assertEquals("b", bucket2); - assertEquals(35, sumi.longValue()); - assertEquals(3, count, 0.1); - - tuple = tuples.get(1); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket1); - assertEquals("b", bucket2); - assertEquals(15, sumi.longValue()); - assertEquals(2, count, 0.1); - - tuple = tuples.get(2); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket1); - assertEquals("b", bucket2); - assertEquals(11, sumi.longValue()); - assertEquals(1, count.doubleValue(), 0.1); - - tuple = tuples.get(3); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket1); - assertEquals("a", bucket2); - assertEquals(4, sumi.longValue()); - assertEquals(1, count.doubleValue(), 0.1); - - tuple = tuples.get(4); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket1); - assertEquals("a", bucket2); - assertEquals(3, sumi.longValue()); - assertEquals(1, count.doubleValue(), 0.1); - - tuple = tuples.get(5); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket1); - assertEquals("a", bucket2); - assertEquals(2, sumi.longValue()); - assertEquals(2, count.doubleValue(), 0.1); - - sorts[0] = new FieldComparator("level1_s", ComparatorOrder.DESCENDING ); - sorts[1] = new FieldComparator("level2_s", ComparatorOrder.DESCENDING ); - facetStream = new FacetStream( - zkHost, - COLLECTIONORALIAS, - sParamsA, - buckets, - metrics, - sorts, - 100); - - tuples = getTuples(facetStream); - assertEquals(6, tuples.size()); - - tuple = tuples.get(0); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket1); - assertEquals("b", bucket2); - assertEquals(11, sumi.longValue()); - assertEquals(1, count, 0.1); - - tuple = tuples.get(1); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket1); - assertEquals("a", bucket2); - assertEquals(4, sumi.longValue()); - assertEquals(1, count.doubleValue(), 0.1); - - tuple = tuples.get(2); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket1); - assertEquals("b", bucket2); - assertEquals(35, sumi.longValue()); - assertEquals(3, count.doubleValue(), 0.1); - - tuple = tuples.get(3); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket1); - assertEquals("a", bucket2); - assertEquals(3, sumi.longValue()); - assertEquals(1, count.doubleValue(), 0.1); - - tuple = tuples.get(4); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket1); - assertEquals("b", bucket2); - assertEquals(15, sumi.longValue()); - assertEquals(2, count.doubleValue(), 0.1); - - tuple = tuples.get(5); - bucket1 = tuple.getString("level1_s"); - bucket2 = tuple.getString("level2_s"); - sumi = tuple.getDouble("sum(a_i)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket1); - assertEquals("a", bucket2); - assertEquals(2, sumi.longValue()); - assertEquals(2, count.doubleValue(), 0.1); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + + SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_i,a_f"); + + Bucket[] buckets = {new Bucket("level1_s"), new Bucket("level2_s")}; + + Metric[] metrics = {new SumMetric("a_i"), + new CountMetric()}; + + FieldComparator[] sorts = {new FieldComparator("sum(a_i)", ComparatorOrder.DESCENDING), new FieldComparator("sum(a_i)", ComparatorOrder.DESCENDING)}; + + FacetStream facetStream = new FacetStream( + zkHost, + COLLECTIONORALIAS, + sParamsA, + buckets, + metrics, + sorts, + 100); + facetStream.setStreamContext(streamContext); + List tuples = getTuples(facetStream); + assertEquals(6, tuples.size()); + + Tuple tuple = tuples.get(0); + String bucket1 = tuple.getString("level1_s"); + String bucket2 = tuple.getString("level2_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + Double count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket1); + assertEquals("b", bucket2); + assertEquals(35, sumi.longValue()); + assertEquals(3, count, 0.1); + + tuple = tuples.get(1); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket1); + assertEquals("b", bucket2); + assertEquals(15, sumi.longValue()); + assertEquals(2, count, 0.1); + + tuple = tuples.get(2); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket1); + assertEquals("b", bucket2); + assertEquals(11, sumi.longValue()); + assertEquals(1, count.doubleValue(), 0.1); + + tuple = tuples.get(3); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket1); + assertEquals("a", bucket2); + assertEquals(4, sumi.longValue()); + assertEquals(1, count.doubleValue(), 0.1); + + tuple = tuples.get(4); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket1); + assertEquals("a", bucket2); + assertEquals(3, sumi.longValue()); + assertEquals(1, count.doubleValue(), 0.1); + + tuple = tuples.get(5); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket1); + assertEquals("a", bucket2); + assertEquals(2, sumi.longValue()); + assertEquals(2, count.doubleValue(), 0.1); + + sorts[0] = new FieldComparator("level1_s", ComparatorOrder.DESCENDING); + sorts[1] = new FieldComparator("level2_s", ComparatorOrder.DESCENDING); + facetStream = new FacetStream( + zkHost, + COLLECTIONORALIAS, + sParamsA, + buckets, + metrics, + sorts, + 100); + facetStream.setStreamContext(streamContext); + tuples = getTuples(facetStream); + assertEquals(6, tuples.size()); + + tuple = tuples.get(0); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket1); + assertEquals("b", bucket2); + assertEquals(11, sumi.longValue()); + assertEquals(1, count, 0.1); + + tuple = tuples.get(1); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket1); + assertEquals("a", bucket2); + assertEquals(4, sumi.longValue()); + assertEquals(1, count.doubleValue(), 0.1); + + tuple = tuples.get(2); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket1); + assertEquals("b", bucket2); + assertEquals(35, sumi.longValue()); + assertEquals(3, count.doubleValue(), 0.1); + + tuple = tuples.get(3); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket1); + assertEquals("a", bucket2); + assertEquals(3, sumi.longValue()); + assertEquals(1, count.doubleValue(), 0.1); + + tuple = tuples.get(4); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket1); + assertEquals("b", bucket2); + assertEquals(15, sumi.longValue()); + assertEquals(2, count.doubleValue(), 0.1); + + tuple = tuples.get(5); + bucket1 = tuple.getString("level1_s"); + bucket2 = tuple.getString("level2_s"); + sumi = tuple.getDouble("sum(a_i)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket1); + assertEquals("a", bucket2); + assertEquals(2, sumi.longValue()); + assertEquals(2, count.doubleValue(), 0.1); + } finally { + solrClientCache.close(); + } } @Test @@ -1413,166 +1537,174 @@ public void testRollupStream() throws Exception { .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9") .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - - SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - - Bucket[] buckets = {new Bucket("a_s")}; - - Metric[] metrics = {new SumMetric("a_i"), - new SumMetric("a_f"), - new MinMetric("a_i"), - new MinMetric("a_f"), - new MaxMetric("a_i"), - new MaxMetric("a_f"), - new MeanMetric("a_i"), - new MeanMetric("a_f"), - new CountMetric()}; - - RollupStream rollupStream = new RollupStream(stream, buckets, metrics); - List tuples = getTuples(rollupStream); - - assert(tuples.size() == 3); - - //Test Long and Double Sums - - Tuple tuple = tuples.get(0); - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); - - - assertEquals("hello0", bucket); - assertEquals(17, sumi.doubleValue(), 0.001); - assertEquals(18, sumf.doubleValue(), 0.001); - assertEquals(0, mini.doubleValue(), 0.001); - assertEquals(1, minf.doubleValue(), 0.001); - assertEquals(14, maxi.doubleValue(), 0.001); - assertEquals(10, maxf.doubleValue(), 0.001); - assertEquals(4.25, avgi.doubleValue(), 0.001); - assertEquals(4.5, avgf.doubleValue(), 0.001); - assertEquals(4, count.doubleValue(), 0.001); - - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket); - assertEquals(38, sumi.doubleValue(), 0.001); - assertEquals(26, sumf.doubleValue(), 0.001); - assertEquals(3, mini.doubleValue(), 0.001); - assertEquals(3, minf.doubleValue(), 0.001); - assertEquals(13, maxi.doubleValue(), 0.001); - assertEquals(9, maxf.doubleValue(), 0.001); - assertEquals(9.5, avgi.doubleValue(), 0.001); - assertEquals(6.5, avgf.doubleValue(), 0.001); - assertEquals(4, count.doubleValue(), 0.001); - - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket); - assertEquals(15, sumi.longValue()); - assertEquals(11, sumf.doubleValue(), 0.01); - assertEquals(4, mini.doubleValue(), 0.01); - assertEquals(4, minf.doubleValue(), 0.01); - assertEquals(11, maxi.doubleValue(), 0.01); - assertEquals(7, maxf.doubleValue(), 0.01); - assertEquals(7.5, avgi.doubleValue(), 0.01); - assertEquals(5.5, avgf.doubleValue(), 0.01); - assertEquals(2, count.doubleValue(), 0.01); - - // Test will null metrics - rollupStream = new RollupStream(stream, buckets, metrics); - tuples = getTuples(rollupStream); - - assert(tuples.size() == 3); - tuple = tuples.get(0); - bucket = tuple.getString("a_s"); - assertTrue(bucket.equals("hello0")); - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - assertTrue(bucket.equals("hello3")); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - assertTrue(bucket.equals("hello4")); - - - //Test will null value in the grouping field - new UpdateRequest() - .add(id, "12", "a_s", null, "a_i", "14", "a_f", "10") - .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - - sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc", "qt", "/export"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - - Bucket[] buckets1 = {new Bucket("a_s")}; - - Metric[] metrics1 = {new SumMetric("a_i"), - new SumMetric("a_f"), - new MinMetric("a_i"), - new MinMetric("a_f"), - new MaxMetric("a_i"), - new MaxMetric("a_f"), - new MeanMetric("a_i"), - new MeanMetric("a_f"), - new CountMetric()}; - - rollupStream = new RollupStream(stream, buckets1, metrics1); - tuples = getTuples(rollupStream); - //Check that we've got the extra NULL bucket - assertEquals(4, tuples.size()); - tuple = tuples.get(0); - assertEquals("NULL", tuple.getString("a_s")); - - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals(14, sumi.doubleValue(), 0.01); - assertEquals(10, sumf.doubleValue(), 0.01); - assertEquals(14, mini.doubleValue(), 0.01); - assertEquals(10, minf.doubleValue(), 0.01); - assertEquals(14, maxi.doubleValue(), 0.01); - assertEquals(10, maxf.doubleValue(), 0.01); - assertEquals(14, avgi.doubleValue(), 0.01); - assertEquals(10, avgf.doubleValue(), 0.01); - assertEquals(1, count.doubleValue(), 0.01); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + + Bucket[] buckets = {new Bucket("a_s")}; + + Metric[] metrics = {new SumMetric("a_i"), + new SumMetric("a_f"), + new MinMetric("a_i"), + new MinMetric("a_f"), + new MaxMetric("a_i"), + new MaxMetric("a_f"), + new MeanMetric("a_i"), + new MeanMetric("a_f"), + new CountMetric()}; + + RollupStream rollupStream = new RollupStream(stream, buckets, metrics); + rollupStream.setStreamContext(streamContext); + List tuples = getTuples(rollupStream); + + assert (tuples.size() == 3); + + //Test Long and Double Sums + + Tuple tuple = tuples.get(0); + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + Double sumf = tuple.getDouble("sum(a_f)"); + Double mini = tuple.getDouble("min(a_i)"); + Double minf = tuple.getDouble("min(a_f)"); + Double maxi = tuple.getDouble("max(a_i)"); + Double maxf = tuple.getDouble("max(a_f)"); + Double avgi = tuple.getDouble("avg(a_i)"); + Double avgf = tuple.getDouble("avg(a_f)"); + Double count = tuple.getDouble("count(*)"); + + + assertEquals("hello0", bucket); + assertEquals(17, sumi.doubleValue(), 0.001); + assertEquals(18, sumf.doubleValue(), 0.001); + assertEquals(0, mini.doubleValue(), 0.001); + assertEquals(1, minf.doubleValue(), 0.001); + assertEquals(14, maxi.doubleValue(), 0.001); + assertEquals(10, maxf.doubleValue(), 0.001); + assertEquals(4.25, avgi.doubleValue(), 0.001); + assertEquals(4.5, avgf.doubleValue(), 0.001); + assertEquals(4, count.doubleValue(), 0.001); + + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket); + assertEquals(38, sumi.doubleValue(), 0.001); + assertEquals(26, sumf.doubleValue(), 0.001); + assertEquals(3, mini.doubleValue(), 0.001); + assertEquals(3, minf.doubleValue(), 0.001); + assertEquals(13, maxi.doubleValue(), 0.001); + assertEquals(9, maxf.doubleValue(), 0.001); + assertEquals(9.5, avgi.doubleValue(), 0.001); + assertEquals(6.5, avgf.doubleValue(), 0.001); + assertEquals(4, count.doubleValue(), 0.001); + + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket); + assertEquals(15, sumi.longValue()); + assertEquals(11, sumf.doubleValue(), 0.01); + assertEquals(4, mini.doubleValue(), 0.01); + assertEquals(4, minf.doubleValue(), 0.01); + assertEquals(11, maxi.doubleValue(), 0.01); + assertEquals(7, maxf.doubleValue(), 0.01); + assertEquals(7.5, avgi.doubleValue(), 0.01); + assertEquals(5.5, avgf.doubleValue(), 0.01); + assertEquals(2, count.doubleValue(), 0.01); + + // Test will null metrics + rollupStream = new RollupStream(stream, buckets, metrics); + rollupStream.setStreamContext(streamContext); + tuples = getTuples(rollupStream); + + assert (tuples.size() == 3); + tuple = tuples.get(0); + bucket = tuple.getString("a_s"); + assertTrue(bucket.equals("hello0")); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + assertTrue(bucket.equals("hello3")); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + assertTrue(bucket.equals("hello4")); + + + //Test will null value in the grouping field + new UpdateRequest() + .add(id, "12", "a_s", null, "a_i", "14", "a_f", "10") + .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + + sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc", "qt", "/export"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + Bucket[] buckets1 = {new Bucket("a_s")}; + + Metric[] metrics1 = {new SumMetric("a_i"), + new SumMetric("a_f"), + new MinMetric("a_i"), + new MinMetric("a_f"), + new MaxMetric("a_i"), + new MaxMetric("a_f"), + new MeanMetric("a_i"), + new MeanMetric("a_f"), + new CountMetric()}; + + rollupStream = new RollupStream(stream, buckets1, metrics1); + rollupStream.setStreamContext(streamContext); + tuples = getTuples(rollupStream); + //Check that we've got the extra NULL bucket + assertEquals(4, tuples.size()); + tuple = tuples.get(0); + assertEquals("NULL", tuple.getString("a_s")); + + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals(14, sumi.doubleValue(), 0.01); + assertEquals(10, sumf.doubleValue(), 0.01); + assertEquals(14, mini.doubleValue(), 0.01); + assertEquals(10, minf.doubleValue(), 0.01); + assertEquals(14, maxi.doubleValue(), 0.01); + assertEquals(10, maxf.doubleValue(), 0.01); + assertEquals(14, avgi.doubleValue(), 0.01); + assertEquals(10, avgf.doubleValue(), 0.01); + assertEquals(1, count.doubleValue(), 0.01); + } finally { + solrClientCache.close(); + } } @Test @@ -1583,66 +1715,71 @@ public void testDaemonTopicStream() throws Exception { SolrClientCache cache = new SolrClientCache(); context.setSolrClientCache(cache); - SolrParams sParams = mapParams("q", "a_s:hello0", "rows", "500", "fl", "id"); + try { + SolrParams sParams = mapParams("q", "a_s:hello0", "rows", "500", "fl", "id"); - TopicStream topicStream = new TopicStream(zkHost, - COLLECTIONORALIAS, - COLLECTIONORALIAS, - "50000000", - -1, - 1000000, sParams); + TopicStream topicStream = new TopicStream(zkHost, + COLLECTIONORALIAS, + COLLECTIONORALIAS, + "50000000", + -1, + 1000000, sParams); - DaemonStream daemonStream = new DaemonStream(topicStream, "daemon1", 1000, 500); - daemonStream.setStreamContext(context); + DaemonStream daemonStream = new DaemonStream(topicStream, "daemon1", 1000, 500); + daemonStream.setStreamContext(context); - daemonStream.open(); + daemonStream.open(); - // Wait for the checkpoint - JettySolrRunner jetty = cluster.getJettySolrRunners().get(0); + // Wait for the checkpoint + JettySolrRunner jetty = cluster.getJettySolrRunners().get(0); - SolrParams sParams1 = mapParams("qt", "/get", "ids", "50000000", "fl", "id"); - int count = 0; - while(count == 0) { - SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/" + COLLECTIONORALIAS, sParams1); - List tuples = getTuples(solrStream); - count = tuples.size(); - if(count > 0) { - Tuple t = tuples.get(0); - assertTrue(t.getLong("id") == 50000000); - } else { - System.out.println("###### Waiting for checkpoint #######:" + count); + SolrParams sParams1 = mapParams("qt", "/get", "ids", "50000000", "fl", "id"); + int count = 0; + while (count == 0) { + SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/" + COLLECTIONORALIAS, sParams1); + solrStream.setStreamContext(context); + List tuples = getTuples(solrStream); + count = tuples.size(); + if (count > 0) { + Tuple t = tuples.get(0); + assertTrue(t.getLong("id") == 50000000); + } else { + System.out.println("###### Waiting for checkpoint #######:" + count); + } } - } - new UpdateRequest() - .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1") - .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2") - .add(id, "3", "a_s", "hello0", "a_i", "3", "a_f", "3") - .add(id, "4", "a_s", "hello0", "a_i", "4", "a_f", "4") - .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5") - .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + new UpdateRequest() + .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1") + .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2") + .add(id, "3", "a_s", "hello0", "a_i", "3", "a_f", "3") + .add(id, "4", "a_s", "hello0", "a_i", "4", "a_f", "4") + .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5") + .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - for(int i=0; i<5; i++) { - daemonStream.read(); - } + for (int i = 0; i < 5; i++) { + daemonStream.read(); + } - new UpdateRequest() - .add(id, "5", "a_s", "hello0", "a_i", "4", "a_f", "4") - .add(id, "6", "a_s", "hello0", "a_i", "4", "a_f", "4") - .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + new UpdateRequest() + .add(id, "5", "a_s", "hello0", "a_i", "4", "a_f", "4") + .add(id, "6", "a_s", "hello0", "a_i", "4", "a_f", "4") + .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - for(int i=0; i<2; i++) { - daemonStream.read(); - } + for (int i = 0; i < 2; i++) { + daemonStream.read(); + } + + daemonStream.shutdown(); - daemonStream.shutdown(); + Tuple tuple = daemonStream.read(); - Tuple tuple = daemonStream.read(); + assertTrue(tuple.EOF); + daemonStream.close(); + } finally { + cache.close(); + } - assertTrue(tuple.EOF); - daemonStream.close(); - cache.close(); } @@ -1662,99 +1799,107 @@ public void testParallelRollupStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc", "partitionKeys", "a_s"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - - Bucket[] buckets = {new Bucket("a_s")}; - - Metric[] metrics = {new SumMetric("a_i"), - new SumMetric("a_f"), - new MinMetric("a_i"), - new MinMetric("a_f"), - new MaxMetric("a_i"), - new MaxMetric("a_f"), - new MeanMetric("a_i"), - new MeanMetric("a_f"), - new CountMetric()}; - - RollupStream rollupStream = new RollupStream(stream, buckets, metrics); - ParallelStream parallelStream = parallelStream(rollupStream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); - attachStreamFactory(parallelStream); - List tuples = getTuples(parallelStream); - - assertEquals(3, tuples.size()); - - //Test Long and Double Sums - - Tuple tuple = tuples.get(0); - String bucket = tuple.getString("a_s"); - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); - - assertEquals("hello0", bucket); - assertEquals(17, sumi.doubleValue(), 0.001); - assertEquals(18, sumf.doubleValue(), 0.001); - assertEquals(0, mini.doubleValue(), 0.001); - assertEquals(1, minf.doubleValue(), 0.001); - assertEquals(14, maxi.doubleValue(), 0.001); - assertEquals(10, maxf.doubleValue(), 0.001); - assertEquals(4.25, avgi.doubleValue(), 0.001); - assertEquals(4.5, avgf.doubleValue(), 0.001); - assertEquals(4, count.doubleValue(), 0.001); - - tuple = tuples.get(1); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello3", bucket); - assertEquals(38, sumi.doubleValue(), 0.001); - assertEquals(26, sumf.doubleValue(), 0.001); - assertEquals(3, mini.doubleValue(), 0.001); - assertEquals(3, minf.doubleValue(), 0.001); - assertEquals(13, maxi.doubleValue(), 0.001); - assertEquals(9, maxf.doubleValue(), 0.001); - assertEquals(9.5, avgi.doubleValue(), 0.001); - assertEquals(6.5, avgf.doubleValue(), 0.001); - assertEquals(4, count.doubleValue(), 0.001); - - tuple = tuples.get(2); - bucket = tuple.getString("a_s"); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); - - assertEquals("hello4", bucket); - assertEquals(15, sumi.longValue()); - assertEquals(11, sumf.doubleValue(), 0.001); - assertEquals(4, mini.doubleValue(), 0.001); - assertEquals(4, minf.doubleValue(), 0.001); - assertEquals(11, maxi.doubleValue(), 0.001); - assertEquals(7, maxf.doubleValue(), 0.001); - assertEquals(7.5, avgi.doubleValue(), 0.001); - assertEquals(5.5, avgf.doubleValue(), 0.001); - assertEquals(2, count.doubleValue(), 0.001); - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + SolrParams sParamsA = mapParams("q", "*:*", "fl", "a_s,a_i,a_f", "sort", "a_s asc", "partitionKeys", "a_s"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + + Bucket[] buckets = {new Bucket("a_s")}; + + Metric[] metrics = {new SumMetric("a_i"), + new SumMetric("a_f"), + new MinMetric("a_i"), + new MinMetric("a_f"), + new MaxMetric("a_i"), + new MaxMetric("a_f"), + new MeanMetric("a_i"), + new MeanMetric("a_f"), + new CountMetric()}; + + RollupStream rollupStream = new RollupStream(stream, buckets, metrics); + ParallelStream parallelStream = parallelStream(rollupStream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); + attachStreamFactory(parallelStream); + parallelStream.setStreamContext(streamContext); + List tuples = getTuples(parallelStream); + + assertEquals(3, tuples.size()); + + //Test Long and Double Sums + + Tuple tuple = tuples.get(0); + String bucket = tuple.getString("a_s"); + Double sumi = tuple.getDouble("sum(a_i)"); + Double sumf = tuple.getDouble("sum(a_f)"); + Double mini = tuple.getDouble("min(a_i)"); + Double minf = tuple.getDouble("min(a_f)"); + Double maxi = tuple.getDouble("max(a_i)"); + Double maxf = tuple.getDouble("max(a_f)"); + Double avgi = tuple.getDouble("avg(a_i)"); + Double avgf = tuple.getDouble("avg(a_f)"); + Double count = tuple.getDouble("count(*)"); + + assertEquals("hello0", bucket); + assertEquals(17, sumi.doubleValue(), 0.001); + assertEquals(18, sumf.doubleValue(), 0.001); + assertEquals(0, mini.doubleValue(), 0.001); + assertEquals(1, minf.doubleValue(), 0.001); + assertEquals(14, maxi.doubleValue(), 0.001); + assertEquals(10, maxf.doubleValue(), 0.001); + assertEquals(4.25, avgi.doubleValue(), 0.001); + assertEquals(4.5, avgf.doubleValue(), 0.001); + assertEquals(4, count.doubleValue(), 0.001); + + tuple = tuples.get(1); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello3", bucket); + assertEquals(38, sumi.doubleValue(), 0.001); + assertEquals(26, sumf.doubleValue(), 0.001); + assertEquals(3, mini.doubleValue(), 0.001); + assertEquals(3, minf.doubleValue(), 0.001); + assertEquals(13, maxi.doubleValue(), 0.001); + assertEquals(9, maxf.doubleValue(), 0.001); + assertEquals(9.5, avgi.doubleValue(), 0.001); + assertEquals(6.5, avgf.doubleValue(), 0.001); + assertEquals(4, count.doubleValue(), 0.001); + + tuple = tuples.get(2); + bucket = tuple.getString("a_s"); + sumi = tuple.getDouble("sum(a_i)"); + sumf = tuple.getDouble("sum(a_f)"); + mini = tuple.getDouble("min(a_i)"); + minf = tuple.getDouble("min(a_f)"); + maxi = tuple.getDouble("max(a_i)"); + maxf = tuple.getDouble("max(a_f)"); + avgi = tuple.getDouble("avg(a_i)"); + avgf = tuple.getDouble("avg(a_f)"); + count = tuple.getDouble("count(*)"); + + assertEquals("hello4", bucket); + assertEquals(15, sumi.longValue()); + assertEquals(11, sumf.doubleValue(), 0.001); + assertEquals(4, mini.doubleValue(), 0.001); + assertEquals(4, minf.doubleValue(), 0.001); + assertEquals(11, maxi.doubleValue(), 0.001); + assertEquals(7, maxf.doubleValue(), 0.001); + assertEquals(7.5, avgi.doubleValue(), 0.001); + assertEquals(5.5, avgf.doubleValue(), 0.001); + assertEquals(2, count.doubleValue(), 0.001); + } finally { + solrClientCache.close(); + } } @Test @@ -1773,15 +1918,23 @@ public void testZeroParallelReducerStream() throws Exception { .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParamsA = mapParams("q", "blah", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "a_s"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - ReducerStream rstream = new ReducerStream(stream, - new FieldEqualitor("a_s"), - new GroupOperation(new FieldComparator("a_s", ComparatorOrder.ASCENDING), 2)); - ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); - assert(tuples.size() == 0); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + try { + SolrParams sParamsA = mapParams("q", "blah", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "a_s"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + ReducerStream rstream = new ReducerStream(stream, + new FieldEqualitor("a_s"), + new GroupOperation(new FieldComparator("a_s", ComparatorOrder.ASCENDING), 2)); + ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.ASCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); + assert (tuples.size() == 0); + } finally { + solrClientCache.close(); + } } @@ -1790,37 +1943,45 @@ public void testTuple() throws Exception { new UpdateRequest() .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "5.1", "s_multi", "a", "s_multi", "b", "i_multi", - "1", "i_multi", "2", "f_multi", "1.2", "f_multi", "1.3") + "1", "i_multi", "2", "f_multi", "1.2", "f_multi", "1.3") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f,s_multi,i_multi,f_multi", "sort", "a_s asc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - List tuples = getTuples(stream); - Tuple tuple = tuples.get(0); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f,s_multi,i_multi,f_multi", "sort", "a_s asc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); + List tuples = getTuples(stream); + Tuple tuple = tuples.get(0); - String s = tuple.getString("a_s"); - assertEquals("hello0", s); - ; + String s = tuple.getString("a_s"); + assertEquals("hello0", s); - long l = tuple.getLong("a_i"); - assertEquals(0, l); - double d = tuple.getDouble("a_f"); - assertEquals(5.1, d, 0.001); + long l = tuple.getLong("a_i"); + assertEquals(0, l); + double d = tuple.getDouble("a_f"); + assertEquals(5.1, d, 0.001); - List stringList = tuple.getStrings("s_multi"); - assertEquals("a", stringList.get(0)); - assertEquals("b", stringList.get(1)); - List longList = tuple.getLongs("i_multi"); - assertEquals(1, longList.get(0).longValue()); - assertEquals(2, longList.get(1).longValue()); + List stringList = tuple.getStrings("s_multi"); + assertEquals("a", stringList.get(0)); + assertEquals("b", stringList.get(1)); - List doubleList = tuple.getDoubles("f_multi"); - assertEquals(1.2, doubleList.get(0).doubleValue(), 0.001); - assertEquals(1.3, doubleList.get(1).doubleValue(), 0.001); + List longList = tuple.getLongs("i_multi"); + assertEquals(1, longList.get(0).longValue()); + assertEquals(2, longList.get(1).longValue()); + List doubleList = tuple.getDoubles("f_multi"); + assertEquals(1.2, doubleList.get(0).doubleValue(), 0.001); + assertEquals(1.3, doubleList.get(1).doubleValue(), 0.001); + } finally { + solrClientCache.close(); + } } @Test @@ -1834,58 +1995,69 @@ public void testMergeStream() throws Exception { .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Test ascending - SolrParams sParamsA = mapParams("q", "id:(4 1)", "fl", "id,a_s,a_i", "sort", "a_i asc"); - CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); - SolrParams sParamsB = mapParams("q", "id:(0 2 3)", "fl", "id,a_s,a_i", "sort", "a_i asc"); - CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + try { + //Test ascending + SolrParams sParamsA = mapParams("q", "id:(4 1)", "fl", "id,a_s,a_i", "sort", "a_i asc"); + CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i",ComparatorOrder.ASCENDING)); - List tuples = getTuples(mstream); + SolrParams sParamsB = mapParams("q", "id:(0 2 3)", "fl", "id,a_s,a_i", "sort", "a_i asc"); + CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - assertEquals(5, tuples.size()); - assertOrder(tuples, 0,1,2,3,4); + MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); + mstream.setStreamContext(streamContext); + List tuples = getTuples(mstream); - //Test descending - sParamsA = mapParams("q", "id:(4 1)", "fl", "id,a_s,a_i", "sort", "a_i desc"); - streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + assertEquals(5, tuples.size()); + assertOrder(tuples, 0, 1, 2, 3, 4); - sParamsB = mapParams("q", "id:(0 2 3)", "fl", "id,a_s,a_i", "sort", "a_i desc"); - streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + //Test descending + sParamsA = mapParams("q", "id:(4 1)", "fl", "id,a_s,a_i", "sort", "a_i desc"); + streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i",ComparatorOrder.DESCENDING)); - tuples = getTuples(mstream); + sParamsB = mapParams("q", "id:(0 2 3)", "fl", "id,a_s,a_i", "sort", "a_i desc"); + streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - assertEquals(5, tuples.size()); - assertOrder(tuples, 4,3,2,1,0); + mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); + mstream.setStreamContext(streamContext); + tuples = getTuples(mstream); - //Test compound sort + assertEquals(5, tuples.size()); + assertOrder(tuples, 4, 3, 2, 1, 0); - sParamsA = mapParams("q", "id:(2 4 1)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); - streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + //Test compound sort - sParamsB = mapParams("q", "id:(0 3)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); - streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + sParamsA = mapParams("q", "id:(2 4 1)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); + streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - mstream = new MergeStream(streamA, streamB, new MultipleFieldComparator(new FieldComparator("a_f",ComparatorOrder.ASCENDING),new FieldComparator("a_i",ComparatorOrder.ASCENDING))); - tuples = getTuples(mstream); + sParamsB = mapParams("q", "id:(0 3)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); + streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - assertEquals(5, tuples.size()); - assertOrder(tuples, 0,2,1,3,4); + mstream = new MergeStream(streamA, streamB, new MultipleFieldComparator(new FieldComparator("a_f", ComparatorOrder.ASCENDING), new FieldComparator("a_i", ComparatorOrder.ASCENDING))); + mstream.setStreamContext(streamContext); + tuples = getTuples(mstream); - sParamsA = mapParams("q", "id:(2 4 1)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i desc"); - streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + assertEquals(5, tuples.size()); + assertOrder(tuples, 0, 2, 1, 3, 4); - sParamsB = mapParams("q", "id:(0 3)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i desc"); - streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + sParamsA = mapParams("q", "id:(2 4 1)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i desc"); + streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - mstream = new MergeStream(streamA, streamB, new MultipleFieldComparator(new FieldComparator("a_f",ComparatorOrder.ASCENDING),new FieldComparator("a_i",ComparatorOrder.DESCENDING))); - tuples = getTuples(mstream); + sParamsB = mapParams("q", "id:(0 3)", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i desc"); + streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - assertEquals(5, tuples.size()); - assertOrder(tuples, 2,0,1,3,4); + mstream = new MergeStream(streamA, streamB, new MultipleFieldComparator(new FieldComparator("a_f", ComparatorOrder.ASCENDING), new FieldComparator("a_i", ComparatorOrder.DESCENDING))); + mstream.setStreamContext(streamContext); + tuples = getTuples(mstream); + assertEquals(5, tuples.size()); + assertOrder(tuples, 2, 0, 1, 3, 4); + } finally { + solrClientCache.close(); + } } @Test @@ -1904,36 +2076,45 @@ public void testParallelMergeStream() throws Exception { .add(id, "9", "a_s", "hello1", "a_i", "100", "a_f", "1") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Test ascending - SolrParams sParamsA = mapParams("q", "id:(4 1 8 7 9)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); - CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); - SolrParams sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); - CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + try { + //Test ascending + SolrParams sParamsA = mapParams("q", "id:(4 1 8 7 9)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); + CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i",ComparatorOrder.ASCENDING)); - ParallelStream pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); + SolrParams sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); + CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - assertEquals(9, tuples.size()); - assertOrder(tuples, 0,1,2,3,4,7,6,8,9); + MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); + ParallelStream pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); - //Test descending - sParamsA = mapParams("q", "id:(4 1 8 9)", "fl", "id,a_s,a_i", "sort", "a_i desc", "partitionKeys", "a_i"); - streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + assertEquals(9, tuples.size()); + assertOrder(tuples, 0, 1, 2, 3, 4, 7, 6, 8, 9); - sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i desc", "partitionKeys", "a_i"); - streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + //Test descending + sParamsA = mapParams("q", "id:(4 1 8 9)", "fl", "id,a_s,a_i", "sort", "a_i desc", "partitionKeys", "a_i"); + streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i",ComparatorOrder.DESCENDING)); - pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); - attachStreamFactory(pstream); - tuples = getTuples(pstream); + sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i desc", "partitionKeys", "a_i"); + streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - assertEquals(8, tuples.size()); - assertOrder(tuples, 9,8,6,4,3,2,1,0); + mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); + pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.DESCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + tuples = getTuples(pstream); + assertEquals(8, tuples.size()); + assertOrder(tuples, 9, 8, 6, 4, 3, 2, 1, 0); + } finally { + solrClientCache.close(); + } } @Test @@ -1952,22 +2133,30 @@ public void testParallelEOF() throws Exception { .add(id, "9", "a_s", "hello1", "a_i", "100", "a_f", "1") .commit(cluster.getSolrClient(), COLLECTIONORALIAS); - //Test ascending - SolrParams sParamsA = mapParams("q", "id:(4 1 8 7 9)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); - CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); - - SolrParams sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); - CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); - - MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i",ComparatorOrder.ASCENDING)); - ParallelStream pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); - attachStreamFactory(pstream); - List tuples = getTuples(pstream); - - assertEquals(9, tuples.size()); - Map eofTuples = pstream.getEofTuples(); - assertEquals(numWorkers, eofTuples.size()); // There should be an EOF Tuple for each worker. - + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); + + try { + //Test ascending + SolrParams sParamsA = mapParams("q", "id:(4 1 8 7 9)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); + CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA); + + SolrParams sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i"); + CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB); + + MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); + ParallelStream pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.ASCENDING)); + attachStreamFactory(pstream); + pstream.setStreamContext(streamContext); + List tuples = getTuples(pstream); + + assertEquals(9, tuples.size()); + Map eofTuples = pstream.getEofTuples(); + assertEquals(numWorkers, eofTuples.size()); // There should be an EOF Tuple for each worker. + } finally { + solrClientCache.close(); + } } @Test @@ -1982,40 +2171,50 @@ public void streamTests() throws Exception { .commit(cluster.getSolrClient(), COLLECTIONORALIAS); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); //Basic CloudSolrStream Test with Descending Sort - SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i desc"); - CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - List tuples = getTuples(stream); - - assertEquals(5,tuples.size()); - assertOrder(tuples, 4, 3, 2, 1, 0); + try { + SolrParams sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i desc"); + CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); + List tuples = getTuples(stream); - //With Ascending Sort - sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i asc"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - tuples = getTuples(stream); + assertEquals(5, tuples.size()); + assertOrder(tuples, 4, 3, 2, 1, 0); - assertEquals(5, tuples.size()); - assertOrder(tuples, 0,1,2,3,4); + //With Ascending Sort + sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i", "sort", "a_i asc"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + assertOrder(tuples, 0, 1, 2, 3, 4); - //Test compound sort - sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i desc"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - assertOrder(tuples, 2,0,1,3,4); + //Test compound sort + sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i desc"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + assertOrder(tuples, 2, 0, 1, 3, 4); - sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); - stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); - tuples = getTuples(stream); - assertEquals(5, tuples.size()); - assertOrder(tuples, 0, 2, 1, 3, 4); + sParams = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc"); + stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); + tuples = getTuples(stream); + assertEquals(5, tuples.size()); + assertOrder(tuples, 0, 2, 1, 3, 4); + } finally { + solrClientCache.close(); + } } @Test @@ -2037,8 +2236,13 @@ private void trySortWithQt(String which) throws Exception { //Basic CloudSolrStream Test bools desc SolrParams sParams = mapParams("q", "*:*", "qt", which, "fl", "id,b_sing", "sort", "b_sing asc,id asc"); + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + try { + stream.setStreamContext(streamContext); List tuples = getTuples(stream); assertEquals(5, tuples.size()); @@ -2047,30 +2251,31 @@ private void trySortWithQt(String which) throws Exception { //Basic CloudSolrStream Test bools desc sParams = mapParams("q", "*:*", "qt", which, "fl", "id,b_sing", "sort", "b_sing desc,id desc"); stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); tuples = getTuples(stream); - assertEquals (5,tuples.size()); + assertEquals(5, tuples.size()); assertOrder(tuples, 4, 3, 1, 2, 0); //Basic CloudSolrStream Test dates desc sParams = mapParams("q", "*:*", "qt", which, "fl", "id,dt_sing", "sort", "dt_sing desc,id asc"); stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); tuples = getTuples(stream); - assertEquals (5,tuples.size()); + assertEquals(5, tuples.size()); assertOrder(tuples, 2, 0, 1, 4, 3); //Basic CloudSolrStream Test ates desc sParams = mapParams("q", "*:*", "qt", which, "fl", "id,dt_sing", "sort", "dt_sing asc,id desc"); stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams); + stream.setStreamContext(streamContext); tuples = getTuples(stream); assertEquals (5,tuples.size()); assertOrder(tuples, 3, 4, 1, 0, 2); } finally { - if (stream != null) { - stream.close(); - } + solrClientCache.close(); } } @@ -2099,11 +2304,15 @@ public void testAllValidExportTypes() throws Exception { // We should be getting the exact same thing back with both the export and select handlers, so test private void tryWithQt(String which) throws IOException { + StreamContext streamContext = new StreamContext(); + SolrClientCache solrClientCache = new SolrClientCache(); + streamContext.setSolrClientCache(solrClientCache); SolrParams sParams = StreamingTest.mapParams("q", "*:*", "qt", which, "fl", "id,i_sing,i_multi,l_sing,l_multi,f_sing,f_multi,d_sing,d_multi,dt_sing,dt_multi,s_sing,s_multi,b_sing,b_multi", "sort", "i_sing asc"); try (CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams)) { + stream.setStreamContext(streamContext); Tuple tuple = getTuple(stream); // All I really care about is that all the fields are returned. There's assertEquals("Integers should be returned", 11, tuple.getLong("i_sing").longValue()); @@ -2141,6 +2350,8 @@ private void tryWithQt(String which) throws IOException { assertTrue("Booleans should be returned", tuple.getBool("b_sing")); assertFalse("MV boolean should be returned for b_multi", tuple.getBools("b_multi").get(0)); assertTrue("MV boolean should be returned for b_multi", tuple.getBools("b_multi").get(1)); + } finally { + solrClientCache.close(); } } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ConversionEvaluatorsTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ConversionEvaluatorsTest.java new file mode 100644 index 000000000000..94124ad36265 --- /dev/null +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ConversionEvaluatorsTest.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.stream.eval; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.collections.map.HashedMap; +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.eval.ConversionEvaluator; +import org.apache.solr.client.solrj.io.eval.RawValueEvaluator; +import org.apache.solr.client.solrj.io.eval.StreamEvaluator; +import org.apache.solr.client.solrj.io.stream.StreamContext; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; +import org.junit.Test; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; + +/** + * Test ConversionEvaluators + */ +public class ConversionEvaluatorsTest { + + + StreamFactory factory; + Map values; + + public ConversionEvaluatorsTest() { + super(); + + factory = new StreamFactory(); + factory.withFunctionName("convert", ConversionEvaluator.class).withFunctionName("raw", RawValueEvaluator.class); + + values = new HashedMap(); + } + + @Test + public void testInvalidExpression() throws Exception { + + StreamEvaluator evaluator; + + try { + evaluator = factory.constructEvaluator("convert(inches)"); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + assertTrue(false); + } catch (IOException e) { + assertTrue(e.getCause().getCause().getMessage().contains("Invalid expression convert(inches) - expecting 3 value but found 1")); + } + + try { + evaluator = factory.constructEvaluator("convert(inches, yards, 3)"); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + Tuple tuple = new Tuple(new HashMap()); + evaluator.evaluate(tuple); + assertTrue(false); + } catch (IOException e) { + assertTrue(e.getCause().getCause().getMessage().contains("No conversion available from INCHES to YARDS")); + } + } + + @Test + public void testInches() throws Exception { + testFunction("convert(inches, centimeters, 2)", (double)(2*2.54)); + testFunction("convert(inches, meters, 2)", (double)(2*0.0254)); + testFunction("convert(inches, millimeters, 2)", (double)(2*25.40)); + } + + @Test + public void testYards() throws Exception { + testFunction("convert(yards, meters, 2)", (double)(2*.91)); + testFunction("convert(yards, kilometers, 2)", (double)(2*.00091)); + } + + @Test + public void testMiles() throws Exception { + testFunction("convert(miles, kilometers, 2)", (double)(2*1.61)); + } + + @Test + public void testMillimeters() throws Exception { + testFunction("convert(millimeters, inches, 2)", (double)(2*.039)); + } + + @Test + public void testCentimeters() throws Exception { + testFunction("convert(centimeters, inches, 2)", (double)(2*.39)); + } + + @Test + public void testMeters() throws Exception { + testFunction("convert(meters, feet, 2)", (double)(2*3.28)); + } + + @Test + public void testKiloMeters() throws Exception { + testFunction("convert(kilometers, feet, 2)", (double)(2*3280.8)); + testFunction("convert(kilometers, miles, 2)", (double)(2*.62)); + } + + public void testFunction(String expression, Number expected) throws Exception { + StreamEvaluator evaluator = factory.constructEvaluator(expression); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + Object result = evaluator.evaluate(new Tuple(values)); + assertTrue(result instanceof Number); + assertEquals(expected, result); + } + + +} diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TemporalEvaluatorsTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TemporalEvaluatorsTest.java new file mode 100644 index 000000000000..8205cea33a88 --- /dev/null +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TemporalEvaluatorsTest.java @@ -0,0 +1,305 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.io.stream.eval; + +import java.io.IOException; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.MonthDay; +import java.time.YearMonth; +import java.time.ZoneOffset; +import java.util.Calendar; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.Locale; +import java.util.Map; +import java.util.TimeZone; + +import org.apache.commons.collections.map.HashedMap; +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorDay; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorDayOfQuarter; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorDayOfYear; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorEpoch; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorHour; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorMinute; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorMonth; +import org.apache.solr.client.solrj.io.eval.StreamEvaluator; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorQuarter; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorSecond; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorWeek; +import org.apache.solr.client.solrj.io.eval.TemporalEvaluatorYear; +import org.apache.solr.client.solrj.io.stream.StreamContext; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpression; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParser; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; +import org.junit.Test; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNull; +import static junit.framework.Assert.assertTrue; + +/** + * Tests numeric Date/Time stream evaluators + */ +public class TemporalEvaluatorsTest { + + + StreamFactory factory; + Map values; + + public TemporalEvaluatorsTest() { + super(); + + factory = new StreamFactory(); + + factory.withFunctionName(TemporalEvaluatorYear.FUNCTION_NAME, TemporalEvaluatorYear.class); + factory.withFunctionName(TemporalEvaluatorMonth.FUNCTION_NAME, TemporalEvaluatorMonth.class); + factory.withFunctionName(TemporalEvaluatorDay.FUNCTION_NAME, TemporalEvaluatorDay.class); + factory.withFunctionName(TemporalEvaluatorDayOfYear.FUNCTION_NAME, TemporalEvaluatorDayOfYear.class); + factory.withFunctionName(TemporalEvaluatorHour.FUNCTION_NAME, TemporalEvaluatorHour.class); + factory.withFunctionName(TemporalEvaluatorMinute.FUNCTION_NAME, TemporalEvaluatorMinute.class); + factory.withFunctionName(TemporalEvaluatorSecond.FUNCTION_NAME, TemporalEvaluatorSecond.class); + factory.withFunctionName(TemporalEvaluatorEpoch.FUNCTION_NAME, TemporalEvaluatorEpoch.class); + factory.withFunctionName(TemporalEvaluatorWeek.FUNCTION_NAME, TemporalEvaluatorWeek.class); + factory.withFunctionName(TemporalEvaluatorQuarter.FUNCTION_NAME, TemporalEvaluatorQuarter.class); + factory.withFunctionName(TemporalEvaluatorDayOfQuarter.FUNCTION_NAME, TemporalEvaluatorDayOfQuarter.class); + + values = new HashedMap(); + } + + @Test + public void testInvalidExpression() throws Exception { + + StreamEvaluator evaluator; + + try { + evaluator = factory.constructEvaluator("week()"); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + assertTrue(false); + } catch (IOException e) { + assertTrue(e.getCause().getCause().getMessage().contains("Invalid expression week()")); + } + + try { + evaluator = factory.constructEvaluator("week(a, b)"); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + assertTrue(false); + } catch (IOException e) { + assertTrue(e.getCause().getCause().getMessage().contains("expecting one value but found 2")); + } + + try { + evaluator = factory.constructEvaluator("Week()"); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + assertTrue(false); + } catch (IOException e) { + assertTrue(e.getMessage().contains("Invalid evaluator expression Week() - function 'Week' is unknown")); + } + } + + + @Test + public void testInvalidValues() throws Exception { + StreamEvaluator evaluator = factory.constructEvaluator("year(a)"); + + + try { + values.clear(); + values.put("a", 12); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + Object result = evaluator.evaluate(new Tuple(values)); + assertTrue(false); + } catch (IOException e) { + assertEquals("Invalid parameter 12 - The parameter must be a string formatted ISO_INSTANT or of type Long,Instant,Date,LocalDateTime or TemporalAccessor.", e.getMessage()); + } + + try { + values.clear(); + values.put("a", "1995-12-31"); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + Object result = evaluator.evaluate(new Tuple(values)); + assertTrue(false); + } catch (IOException e) { + assertEquals("Invalid parameter 1995-12-31 - The String must be formatted in the ISO_INSTANT date format.", e.getMessage()); + } + + try { + values.clear(); + values.put("a", ""); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + Object result = evaluator.evaluate(new Tuple(values)); + assertTrue(false); + } catch (IOException e) { + assertEquals("Invalid parameter - The parameter must be a string formatted ISO_INSTANT or of type Long,Instant,Date,LocalDateTime or TemporalAccessor.", e.getMessage()); + } + + values.clear(); + values.put("a", null); + assertNull(evaluator.evaluate(new Tuple(values))); + } + + @Test + public void testAllFunctions() throws Exception { + + //year, month, day, dayofyear, hour, minute, quarter, week, second, epoch + testFunction("year(a)", "1995-12-31T23:59:59Z", 1995); + testFunction("month(a)","1995-12-31T23:59:59Z", 12); + testFunction("day(a)", "1995-12-31T23:59:59Z", 31); + testFunction("dayOfYear(a)", "1995-12-31T23:59:59Z", 365); + testFunction("dayOfQuarter(a)", "1995-12-31T23:59:59Z", 92); + testFunction("hour(a)", "1995-12-31T23:59:59Z", 23); + testFunction("minute(a)", "1995-12-31T23:59:59Z", 59); + testFunction("quarter(a)","1995-12-31T23:59:59Z", 4); + testFunction("week(a)", "1995-12-31T23:59:59Z", 52); + testFunction("second(a)", "1995-12-31T23:59:58Z", 58); + testFunction("epoch(a)", "1995-12-31T23:59:59Z", 820454399000l); + + testFunction("year(a)", "2017-03-17T10:30:45Z", 2017); + testFunction("year('a')", "2017-03-17T10:30:45Z", 2017); + testFunction("month(a)","2017-03-17T10:30:45Z", 3); + testFunction("day(a)", "2017-03-17T10:30:45Z", 17); + testFunction("day('a')", "2017-03-17T10:30:45Z", 17); + testFunction("dayOfYear(a)", "2017-03-17T10:30:45Z", 76); + testFunction("dayOfQuarter(a)", "2017-03-17T10:30:45Z", 76); + testFunction("hour(a)", "2017-03-17T10:30:45Z", 10); + testFunction("minute(a)", "2017-03-17T10:30:45Z", 30); + testFunction("quarter(a)","2017-03-17T10:30:45Z", 1); + testFunction("week(a)", "2017-03-17T10:30:45Z", 11); + testFunction("second(a)", "2017-03-17T10:30:45Z", 45); + testFunction("epoch(a)", "2017-03-17T10:30:45Z", 1489746645000l); + + testFunction("epoch(a)", new Date(1489746645500l).toInstant().toString(), 1489746645500l); + testFunction("epoch(a)", new Date(820454399990l).toInstant().toString(), 820454399990l); + + } + + @Test + public void testFunctionsOnDate() throws Exception { + Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT); + calendar.set(2017, 12, 5, 23, 59); + Date aDate = calendar.getTime(); + testFunction("year(a)", aDate, calendar.get(Calendar.YEAR)); + testFunction("month(a)", aDate, calendar.get(Calendar.MONTH)+1); + testFunction("day(a)", aDate, calendar.get(Calendar.DAY_OF_MONTH)); + testFunction("hour(a)", aDate, calendar.get(Calendar.HOUR_OF_DAY)); + testFunction("minute(a)", aDate, calendar.get(Calendar.MINUTE)); + testFunction("epoch(a)", aDate, aDate.getTime()); + } + + @Test + public void testFunctionsOnInstant() throws Exception { + Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT); + calendar.set(2017, 12, 5, 23, 59); + Date aDate = calendar.getTime(); + Instant instant = aDate.toInstant(); + testFunction("year(a)", instant, calendar.get(Calendar.YEAR)); + testFunction("month(a)", instant, calendar.get(Calendar.MONTH)+1); + testFunction("day(a)", instant, calendar.get(Calendar.DAY_OF_MONTH)); + testFunction("hour(a)", instant, calendar.get(Calendar.HOUR_OF_DAY)); + testFunction("minute(a)", instant, calendar.get(Calendar.MINUTE)); + testFunction("epoch(a)", instant, aDate.getTime()); + } + + @Test + public void testFunctionsLocalDateTime() throws Exception { + + LocalDateTime localDateTime = LocalDateTime.of(2017,12,5, 23, 59); + Date aDate = Date.from(localDateTime.atZone(ZoneOffset.UTC).toInstant()); + testFunction("year(a)", localDateTime, 2017); + testFunction("month(a)", localDateTime, 12); + testFunction("day(a)", localDateTime, 5); + testFunction("hour(a)", localDateTime, 23); + testFunction("minute(a)", localDateTime, 59); + testFunction("epoch(a)", localDateTime, aDate.getTime()); + } + + @Test + public void testFunctionsOnLong() throws Exception { + + Long longDate = 1512518340000l; + + testFunction("year(a)", longDate, 2017); + testFunction("month(a)", longDate, 12); + testFunction("day(a)", longDate, 5); + testFunction("hour(a)", longDate, 23); + testFunction("minute(a)", longDate, 59); + testFunction("second(a)", longDate, 0); + testFunction("epoch(a)", longDate, longDate); + + } + + @Test + public void testLimitedFunctions() throws Exception { + + MonthDay monthDay = MonthDay.of(12,5); + testFunction("month(a)", monthDay, 12); + testFunction("day(a)", monthDay, 5); + + try { + testFunction("year(a)", monthDay, 2017); + assertTrue(false); + } catch (IOException e) { + assertEquals("It is not possible to call 'year' function on java.time.MonthDay", e.getMessage()); + } + + YearMonth yearMonth = YearMonth.of(2018, 4); + testFunction("month(a)", yearMonth, 4); + testFunction("year(a)", yearMonth, 2018); + + try { + testFunction("day(a)", yearMonth, 5); + assertTrue(false); + } catch (IOException e) { + assertEquals("It is not possible to call 'day' function on java.time.YearMonth", e.getMessage()); + } + + } + + + public void testFunction(String expression, Object value, Number expected) throws Exception { + StreamEvaluator evaluator = factory.constructEvaluator(expression); + StreamContext streamContext = new StreamContext(); + evaluator.setStreamContext(streamContext); + values.clear(); + values.put("a", value); + Object result = evaluator.evaluate(new Tuple(values)); + assertTrue(result instanceof Number); + assertEquals(expected, result); + } + + @Test + public void testExplain() throws IOException { + StreamExpression express = StreamExpressionParser.parse("month('myfield')"); + TemporalEvaluatorMonth datePartEvaluator = new TemporalEvaluatorMonth(express,factory); + Explanation explain = datePartEvaluator.toExplanation(factory); + assertEquals("month(myfield)", explain.getExpression()); + + express = StreamExpressionParser.parse("day(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbb)"); + TemporalEvaluatorDay dayPartEvaluator = new TemporalEvaluatorDay(express,factory); + explain = dayPartEvaluator.toExplanation(factory); + assertEquals("day(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbb)", explain.getExpression()); + } +} diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java index 0d4cedd4422f..54ab06d59ea7 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java @@ -33,6 +33,7 @@ import java.lang.invoke.MethodHandles; import java.lang.reflect.Method; import java.net.MalformedURLException; +import java.net.ServerSocket; import java.net.URL; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; @@ -802,6 +803,19 @@ public static void deleteCore() { configString = schemaString = null; } + /** + * Find next available local port. + * @return available port number or -1 if none could be found + * @throws Exception on IO errors + */ + protected static int getNextAvailablePort() throws Exception { + int port = -1; + try (ServerSocket s = new ServerSocket(0)) { + port = s.getLocalPort(); + } + return port; + } + /** Validates an update XML String is successful */ diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java index ade1c699af00..48f7670f01a1 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java @@ -19,7 +19,6 @@ import java.io.File; import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.net.ServerSocket; import java.net.URI; import java.net.URL; import java.nio.file.Path; @@ -598,14 +597,6 @@ protected SocketProxy getProxyForReplica(Replica replica) throws Exception { return proxy; } - protected int getNextAvailablePort() throws Exception { - int port = -1; - try (ServerSocket s = new ServerSocket(0)) { - port = s.getLocalPort(); - } - return port; - } - private File getRelativeSolrHomePath(File solrHome) { final Path solrHomePath = solrHome.toPath(); final Path curDirPath = new File("").getAbsoluteFile().toPath(); diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java index 15895d33b772..06052819f6b1 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java @@ -87,6 +87,11 @@ public class MiniSolrCloudCluster { " ${distribUpdateConnTimeout:45000}\n" + " ${distribUpdateSoTimeout:340000}\n" + " \n" + + " \n" + + " \n" + + " solr_${hostPort:8983}\n" + + " \n" + + " \n" + " \n" + "\n"; diff --git a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java index 2386681422de..cefd75f8c088 100644 --- a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java +++ b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java @@ -187,16 +187,16 @@ public static NodeConfig buildTestNodeConfig(SolrResourceLoader loader) { .build(); if (System.getProperty("zkHost") == null) cloudConfig = null; - UpdateShardHandlerConfig updateShardHandlerConfig - = new UpdateShardHandlerConfig(UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONS, - UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONSPERHOST, - 30000, 30000, - UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY); + UpdateShardHandlerConfig updateShardHandlerConfig = new UpdateShardHandlerConfig( + UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONS, + UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONSPERHOST, + 30000, 30000, + UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY, UpdateShardHandlerConfig.DEFAULT_MAXRECOVERYTHREADS); // universal default metric reporter - Map attributes = new HashMap<>(); + Map attributes = new HashMap<>(); attributes.put("name", "default"); attributes.put("class", SolrJmxReporter.class.getName()); - PluginInfo defaultPlugin = new PluginInfo("reporter", attributes, null, null); + PluginInfo defaultPlugin = new PluginInfo("reporter", attributes); return new NodeConfig.NodeConfigBuilder("testNode", loader) .setUseSchemaCache(Boolean.getBoolean("shareSchema")) @@ -222,13 +222,15 @@ public TestCoresLocator(String coreName, String dataDir, String solrConfig, Stri @Override public List discover(CoreContainer cc) { - return ImmutableList.of(new CoreDescriptor(cc, coreName, cc.getCoreRootDirectory().resolve(coreName), + return ImmutableList.of(new CoreDescriptor(coreName, cc.getCoreRootDirectory().resolve(coreName), + cc.getContainerProperties(), cc.isZooKeeperAware(), CoreDescriptor.CORE_DATADIR, dataDir, CoreDescriptor.CORE_CONFIG, solrConfig, CoreDescriptor.CORE_SCHEMA, schema, CoreDescriptor.CORE_COLLECTION, System.getProperty("collection", "collection1"), CoreDescriptor.CORE_SHARD, System.getProperty("shard", "shard1"))); } + } public CoreContainer getCoreContainer() { diff --git a/solr/webapp/web/css/angular/plugins.css b/solr/webapp/web/css/angular/plugins.css index 0310e0e5d542..03dc2eacf405 100644 --- a/solr/webapp/web/css/angular/plugins.css +++ b/solr/webapp/web/css/angular/plugins.css @@ -33,6 +33,8 @@ limitations under the License. #content #plugins #navigation .PLUGINCHANGES { margin-top: 20px; } #content #plugins #navigation .PLUGINCHANGES a { background-image: url( ../../img/ico/eye.png ); } #content #plugins #navigation .RELOAD a { background-image: url( ../../img/ico/arrow-circle.png ); } +#content #plugins #navigation .NOTE { margin-top: 20px; } +#content #plugins #navigation .NOTE p { color: #c0c0c0; font-style: italic; } #content #plugins #navigation a @@ -125,14 +127,14 @@ limitations under the License. #content #plugins #frame .entry .stats span { float: left; - width: 11%; + width: 9%; } #content #plugins #frame .entry dd, #content #plugins #frame .entry .stats ul { float: right; - width: 88%; + width: 90%; } #content #plugins #frame .entry .stats ul @@ -144,12 +146,12 @@ limitations under the License. #content #plugins #frame .entry .stats dt { - width: 27%; + width: 40%; } #content #plugins #frame .entry .stats dd { - width: 72%; + width: 59%; } #content #plugins #frame .entry.expanded a.linker { diff --git a/solr/webapp/web/partials/plugins.html b/solr/webapp/web/partials/plugins.html index d95fc9b32ddf..bd122a75495d 100644 --- a/solr/webapp/web/partials/plugins.html +++ b/solr/webapp/web/partials/plugins.html @@ -55,8 +55,8 @@
  • Watch Changes
  • Refresh Values
  • +
  • NOTE: Only selected metrics are shown here. Full metrics can be accessed via /admin/metrics handler.

  • -