You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by kr...@apache.org on 2022/03/04 18:08:31 UTC
[solr] branch main updated: SOLR-14920: Spotless formatting for core - non-test only (#705)
This is an automated email from the ASF dual-hosted git repository.
krisden pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git
The following commit(s) were added to refs/heads/main by this push:
new c99af20 SOLR-14920: Spotless formatting for core - non-test only (#705)
c99af20 is described below
commit c99af207c761ec34812ef1cc3054eb2804b7448b
Author: Kevin Risden <ri...@users.noreply.github.com>
AuthorDate: Fri Mar 4 13:08:24 2022 -0500
SOLR-14920: Spotless formatting for core - non-test only (#705)
Co-authored-by: David Smiley <ds...@apache.org>
Co-authored-by: Erick Erickson <er...@apache.org>
---
gradle/validation/spotless.gradle | 8 +-
.../apache/solr/analysis/LowerCaseTokenizer.java | 63 +-
.../solr/analysis/LowerCaseTokenizerFactory.java | 34 +-
.../solr/analysis/ReversedWildcardFilter.java | 56 +-
.../analysis/ReversedWildcardFilterFactory.java | 72 +-
.../org/apache/solr/analysis/SolrAnalyzer.java | 4 +-
.../org/apache/solr/analysis/TokenizerChain.java | 67 +-
.../org/apache/solr/analysis/package-info.java | 14 +-
.../src/java/org/apache/solr/api/AnnotatedApi.java | 87 +-
solr/core/src/java/org/apache/solr/api/Api.java | 32 +-
solr/core/src/java/org/apache/solr/api/ApiBag.java | 125 +-
.../src/java/org/apache/solr/api/ApiSupport.java | 15 +-
.../core/src/java/org/apache/solr/api/Command.java | 6 +-
.../org/apache/solr/api/ConfigurablePlugin.java | 6 +-
.../apache/solr/api/ContainerPluginsRegistry.java | 192 +-
.../src/java/org/apache/solr/api/EndPoint.java | 1 -
.../src/java/org/apache/solr/api/PayloadObj.java | 55 +-
.../src/java/org/apache/solr/api/V2HttpCall.java | 233 +-
.../src/java/org/apache/solr/api/package-info.java | 5 +-
.../client/solrj/embedded/EmbeddedSolrServer.java | 76 +-
.../solr/client/solrj/embedded/JettyConfig.java | 53 +-
.../client/solrj/embedded/JettySolrRunner.java | 363 +--
.../solr/client/solrj/embedded/package-info.java | 10 +-
.../java/org/apache/solr/cloud/ActionThrottle.java | 20 +-
.../apache/solr/cloud/ActiveReplicaWatcher.java | 77 +-
.../org/apache/solr/cloud/CloudDescriptor.java | 71 +-
.../src/java/org/apache/solr/cloud/CloudUtil.java | 172 +-
.../org/apache/solr/cloud/ClusterSingleton.java | 47 +-
.../apache/solr/cloud/ConfigSetApiLockFactory.java | 22 +-
.../java/org/apache/solr/cloud/ConfigSetCmds.java | 90 +-
.../solr/cloud/DistributedApiAsyncTracker.java | 174 +-
.../solr/cloud/DistributedClusterStateUpdater.java | 679 ++--
.../cloud/DistributedCollectionLockFactory.java | 53 +-
.../cloud/DistributedConfigSetLockFactory.java | 22 +-
.../org/apache/solr/cloud/DistributedLock.java | 6 +-
.../java/org/apache/solr/cloud/DistributedMap.java | 38 +-
.../apache/solr/cloud/DistributedMultiLock.java | 13 +-
.../org/apache/solr/cloud/ElectionContext.java | 21 +-
.../apache/solr/cloud/ExclusiveSliceProperty.java | 158 +-
.../java/org/apache/solr/cloud/LeaderElector.java | 182 +-
.../src/java/org/apache/solr/cloud/LockTree.java | 48 +-
.../src/java/org/apache/solr/cloud/Overseer.java | 721 +++--
.../OverseerCollectionConfigSetProcessor.java | 71 +-
.../cloud/OverseerConfigSetMessageHandler.java | 44 +-
.../apache/solr/cloud/OverseerElectionContext.java | 21 +-
.../apache/solr/cloud/OverseerMessageHandler.java | 9 +-
.../apache/solr/cloud/OverseerNodePrioritizer.java | 60 +-
.../apache/solr/cloud/OverseerSolrResponse.java | 9 +-
.../solr/cloud/OverseerSolrResponseSerializer.java | 28 +-
.../apache/solr/cloud/OverseerTaskProcessor.java | 312 +-
.../org/apache/solr/cloud/OverseerTaskQueue.java | 88 +-
.../solr/cloud/RecoveringCoreTermWatcher.java | 16 +-
.../org/apache/solr/cloud/RecoveryStrategy.java | 270 +-
.../solr/cloud/RefreshCollectionMessage.java | 14 +-
.../org/apache/solr/cloud/ReplicateFromLeader.java | 66 +-
.../solr/cloud/ShardLeaderElectionContext.java | 164 +-
.../solr/cloud/ShardLeaderElectionContextBase.java | 158 +-
.../solr/cloud/SizeLimitedDistributedMap.java | 45 +-
.../java/org/apache/solr/cloud/SolrZkServer.java | 127 +-
.../core/src/java/org/apache/solr/cloud/Stats.java | 17 +-
.../java/org/apache/solr/cloud/SyncStrategy.java | 250 +-
.../core/src/java/org/apache/solr/cloud/ZkCLI.java | 233 +-
.../org/apache/solr/cloud/ZkCollectionTerms.java | 12 +-
.../org/apache/solr/cloud/ZkConfigSetService.java | 82 +-
.../java/org/apache/solr/cloud/ZkController.java | 1479 +++++----
.../cloud/ZkDistributedCollectionLockFactory.java | 79 +-
.../cloud/ZkDistributedConfigSetLockFactory.java | 26 +-
.../org/apache/solr/cloud/ZkDistributedLock.java | 94 +-
.../solr/cloud/ZkDistributedLockFactory.java | 9 +-
.../org/apache/solr/cloud/ZkDistributedQueue.java | 164 +-
.../solr/cloud/ZkDistributedQueueFactory.java | 10 +-
.../java/org/apache/solr/cloud/ZkShardTerms.java | 197 +-
.../apache/solr/cloud/ZkSolrResourceLoader.java | 58 +-
.../solr/cloud/api/collections/AddReplicaCmd.java | 269 +-
.../solr/cloud/api/collections/AliasCmd.java | 69 +-
.../apache/solr/cloud/api/collections/Assign.java | 283 +-
.../solr/cloud/api/collections/BackupCmd.java | 359 ++-
.../cloud/api/collections/CategoryRoutedAlias.java | 99 +-
.../solr/cloud/api/collections/CollApiCmds.java | 333 +-
.../api/collections/CollectionApiLockFactory.java | 97 +-
.../api/collections/CollectionCommandContext.java | 53 +-
.../api/collections/CollectionHandlingUtils.java | 379 ++-
.../solr/cloud/api/collections/CreateAliasCmd.java | 114 +-
.../cloud/api/collections/CreateCollectionCmd.java | 453 ++-
.../solr/cloud/api/collections/CreateShardCmd.java | 140 +-
.../cloud/api/collections/CreateSnapshotCmd.java | 106 +-
.../solr/cloud/api/collections/DeleteAliasCmd.java | 12 +-
.../cloud/api/collections/DeleteBackupCmd.java | 715 +++--
.../cloud/api/collections/DeleteCollectionCmd.java | 124 +-
.../solr/cloud/api/collections/DeleteNodeCmd.java | 92 +-
.../cloud/api/collections/DeleteReplicaCmd.java | 252 +-
.../solr/cloud/api/collections/DeleteShardCmd.java | 158 +-
.../cloud/api/collections/DeleteSnapshotCmd.java | 82 +-
.../api/collections/DimensionalRoutedAlias.java | 130 +-
.../DistributedCollectionCommandContext.java | 15 +-
...istributedCollectionConfigSetCommandRunner.java | 377 ++-
.../api/collections/LeaderRecoveryWatcher.java | 19 +-
.../api/collections/MaintainRoutedAliasCmd.java | 174 +-
.../solr/cloud/api/collections/MigrateCmd.java | 360 ++-
.../solr/cloud/api/collections/MoveReplicaCmd.java | 332 +-
.../collections/OcmhCollectionCommandContext.java | 7 +-
.../OverseerCollectionMessageHandler.java | 99 +-
.../cloud/api/collections/OverseerRoleCmd.java | 50 +-
.../cloud/api/collections/OverseerStatusCmd.java | 220 +-
.../api/collections/ReindexCollectionCmd.java | 496 +--
.../solr/cloud/api/collections/RenameCmd.java | 32 +-
.../solr/cloud/api/collections/ReplaceNodeCmd.java | 200 +-
.../solr/cloud/api/collections/RestoreCmd.java | 444 ++-
.../solr/cloud/api/collections/RoutedAlias.java | 265 +-
.../cloud/api/collections/SetAliasPropCmd.java | 28 +-
.../solr/cloud/api/collections/SplitShardCmd.java | 625 ++--
.../cloud/api/collections/TimeRoutedAlias.java | 282 +-
.../solr/cloud/api/collections/package-info.java | 8 +-
.../solr/cloud/overseer/ClusterStateMutator.java | 43 +-
.../solr/cloud/overseer/CollectionMutator.java | 54 +-
.../apache/solr/cloud/overseer/NodeMutator.java | 59 +-
.../apache/solr/cloud/overseer/OverseerAction.java | 5 +-
.../apache/solr/cloud/overseer/ReplicaMutator.java | 284 +-
.../apache/solr/cloud/overseer/SliceMutator.java | 128 +-
.../apache/solr/cloud/overseer/ZkStateWriter.java | 158 +-
.../apache/solr/cloud/overseer/ZkWriteCommand.java | 14 +-
.../apache/solr/cloud/overseer/package-info.java | 4 +-
.../java/org/apache/solr/cloud/package-info.java | 4 +-
.../src/java/org/apache/solr/cluster/Cluster.java | 7 +-
.../src/java/org/apache/solr/cluster/Node.java | 4 +-
.../src/java/org/apache/solr/cluster/Replica.java | 25 +-
.../src/java/org/apache/solr/cluster/Shard.java | 16 +-
.../org/apache/solr/cluster/SolrCollection.java | 60 +-
.../apache/solr/cluster/events/ClusterEvent.java | 10 +-
.../solr/cluster/events/ClusterEventListener.java | 10 +-
.../solr/cluster/events/ClusterEventProducer.java | 25 +-
.../cluster/events/ClusterEventProducerBase.java | 46 +-
.../events/ClusterPropertiesChangedEvent.java | 4 +-
.../solr/cluster/events/CollectionsAddedEvent.java | 5 +-
.../cluster/events/CollectionsRemovedEvent.java | 4 +-
.../apache/solr/cluster/events/NoOpProducer.java | 10 +-
.../apache/solr/cluster/events/NodesDownEvent.java | 4 +-
.../apache/solr/cluster/events/NodesUpEvent.java | 4 +-
.../events/impl/ClusterEventProducerFactory.java | 213 +-
.../impl/CollectionsRepairEventListener.java | 176 +-
.../events/impl/DefaultClusterEventProducer.java | 216 +-
.../impl/DelegatingClusterEventProducer.java | 73 +-
.../solr/cluster/events/impl/package-info.java | 6 +-
.../apache/solr/cluster/events/package-info.java | 8 +-
.../java/org/apache/solr/cluster/package-info.java | 19 +-
.../solr/cluster/placement/AttributeFetcher.java | 35 +-
.../solr/cluster/placement/AttributeValues.java | 14 +-
.../solr/cluster/placement/CollectionMetrics.java | 6 +-
.../cluster/placement/DeleteCollectionRequest.java | 7 +-
.../cluster/placement/DeleteReplicasRequest.java | 7 +-
.../cluster/placement/DeleteShardsRequest.java | 4 +-
.../org/apache/solr/cluster/placement/Metric.java | 13 +-
.../cluster/placement/ModificationRequest.java | 8 +-
.../apache/solr/cluster/placement/NodeMetric.java | 28 +-
.../solr/cluster/placement/PlacementContext.java | 16 +-
.../solr/cluster/placement/PlacementException.java | 11 +-
.../placement/PlacementModificationException.java | 22 +-
.../solr/cluster/placement/PlacementPlan.java | 22 +-
.../cluster/placement/PlacementPlanFactory.java | 43 +-
.../solr/cluster/placement/PlacementPlugin.java | 59 +-
.../cluster/placement/PlacementPluginConfig.java | 10 +-
.../cluster/placement/PlacementPluginFactory.java | 45 +-
.../solr/cluster/placement/PlacementRequest.java | 44 +-
.../solr/cluster/placement/ReplicaMetric.java | 7 +-
.../solr/cluster/placement/ReplicaMetrics.java | 6 +-
.../solr/cluster/placement/ReplicaPlacement.java | 18 +-
.../solr/cluster/placement/ShardMetrics.java | 8 +-
.../placement/impl/AttributeFetcherImpl.java | 153 +-
.../placement/impl/AttributeValuesImpl.java | 21 +-
.../placement/impl/CollectionMetricsBuilder.java | 51 +-
.../impl/DelegatingPlacementPluginFactory.java | 17 +-
.../solr/cluster/placement/impl/MetricImpl.java | 85 +-
.../placement/impl/ModificationRequestImpl.java | 48 +-
.../cluster/placement/impl/NodeMetricImpl.java | 47 +-
.../placement/impl/PlacementPlanFactoryImpl.java | 13 +-
.../cluster/placement/impl/PlacementPlanImpl.java | 1 -
.../impl/PlacementPluginAssignStrategy.java | 40 +-
.../impl/PlacementPluginFactoryLoader.java | 93 +-
.../placement/impl/PlacementRequestImpl.java | 54 +-
.../cluster/placement/impl/ReplicaMetricImpl.java | 16 +-
.../placement/impl/ReplicaPlacementImpl.java | 40 +-
.../impl/SimpleClusterAbstractionsImpl.java | 152 +-
.../placement/impl/SimplePlacementContextImpl.java | 7 +-
.../solr/cluster/placement/impl/package-info.java | 4 +-
.../solr/cluster/placement/package-info.java | 31 +-
.../placement/plugins/AffinityPlacementConfig.java | 95 +-
.../plugins/AffinityPlacementFactory.java | 726 +++--
.../plugins/MinimizeCoresPlacementFactory.java | 90 +-
.../placement/plugins/RandomPlacementFactory.java | 53 +-
.../placement/plugins/SimplePlacementFactory.java | 77 +-
.../cluster/placement/plugins/package-info.java | 5 +-
.../solr/core/AbstractSolrEventListener.java | 30 +-
.../java/org/apache/solr/core/BlobRepository.java | 118 +-
.../solr/core/ByteBuffersDirectoryFactory.java | 20 +-
.../apache/solr/core/CachingDirectoryFactory.java | 60 +-
.../apache/solr/core/CancellableQueryTracker.java | 126 +-
.../src/java/org/apache/solr/core/CloseHook.java | 48 +-
.../src/java/org/apache/solr/core/CloudConfig.java | 74 +-
.../org/apache/solr/core/ClusterSingletons.java | 183 +-
.../java/org/apache/solr/core/CodecFactory.java | 4 +-
.../java/org/apache/solr/core/ConfigOverlay.java | 41 +-
.../src/java/org/apache/solr/core/ConfigSet.java | 28 +-
.../org/apache/solr/core/ConfigSetProperties.java | 24 +-
.../org/apache/solr/core/ConfigSetService.java | 179 +-
.../java/org/apache/solr/core/CoreContainer.java | 1088 ++++---
.../java/org/apache/solr/core/CoreDescriptor.java | 190 +-
.../apache/solr/core/CorePropertiesLocator.java | 110 +-
.../src/java/org/apache/solr/core/CoreSorter.java | 114 +-
.../java/org/apache/solr/core/CoresLocator.java | 28 +-
.../apache/solr/core/DelegatingEventListener.java | 6 +-
.../src/java/org/apache/solr/core/Diagnostics.java | 8 +-
.../org/apache/solr/core/DirectoryFactory.java | 265 +-
.../solr/core/EphemeralDirectoryFactory.java | 26 +-
.../solr/core/FileSystemConfigSetService.java | 28 +-
.../solr/core/IndexDeletionPolicyWrapper.java | 304 +-
.../org/apache/solr/core/IndexReaderFactory.java | 50 +-
.../src/java/org/apache/solr/core/InitParams.java | 43 +-
.../org/apache/solr/core/MMapDirectoryFactory.java | 28 +-
.../java/org/apache/solr/core/MetricsConfig.java | 49 +-
.../apache/solr/core/NIOFSDirectoryFactory.java | 10 +-
.../solr/core/NRTCachingDirectoryFactory.java | 15 +-
.../src/java/org/apache/solr/core/NodeConfig.java | 279 +-
.../src/java/org/apache/solr/core/NodeRoles.java | 39 +-
.../org/apache/solr/core/OverlaidConfigNode.java | 19 +-
.../src/java/org/apache/solr/core/PluginBag.java | 127 +-
.../src/java/org/apache/solr/core/PluginInfo.java | 131 +-
.../org/apache/solr/core/QuerySenderListener.java | 40 +-
.../org/apache/solr/core/RAMDirectoryFactory.java | 19 +-
.../org/apache/solr/core/RateLimiterConfig.java | 13 +-
.../java/org/apache/solr/core/RequestHandlers.java | 108 +-
.../java/org/apache/solr/core/RequestParams.java | 83 +-
.../org/apache/solr/core/SchemaCodecFactory.java | 122 +-
.../apache/solr/core/ShutdownAwareDirectory.java | 6 +-
.../src/java/org/apache/solr/core/SolrConfig.java | 628 ++--
.../src/java/org/apache/solr/core/SolrCore.java | 1588 ++++++----
.../solr/core/SolrCoreInitializationException.java | 14 +-
.../src/java/org/apache/solr/core/SolrCores.java | 244 +-
.../org/apache/solr/core/SolrDeletionPolicy.java | 34 +-
.../org/apache/solr/core/SolrEventListener.java | 46 +-
.../java/org/apache/solr/core/SolrInfoBean.java | 50 +-
.../src/java/org/apache/solr/core/SolrPaths.java | 78 +-
.../org/apache/solr/core/SolrResourceLoader.java | 496 +--
.../java/org/apache/solr/core/SolrXmlConfig.java | 307 +-
.../apache/solr/core/StandardDirectoryFactory.java | 72 +-
.../solr/core/StandardIndexReaderFactory.java | 9 +-
.../org/apache/solr/core/TracerConfigurator.java | 18 +-
.../apache/solr/core/TransientSolrCoreCache.java | 114 +-
.../solr/core/TransientSolrCoreCacheDefault.java | 35 +-
.../solr/core/TransientSolrCoreCacheFactory.java | 46 +-
.../java/org/apache/solr/core/XmlConfigFile.java | 221 +-
.../src/java/org/apache/solr/core/ZkContainer.java | 164 +-
.../solr/core/backup/AggregateBackupStats.java | 30 +-
.../apache/solr/core/backup/BackupFilePaths.java | 278 +-
.../java/org/apache/solr/core/backup/BackupId.java | 76 +-
.../org/apache/solr/core/backup/BackupManager.java | 176 +-
.../apache/solr/core/backup/BackupProperties.java | 284 +-
.../java/org/apache/solr/core/backup/Checksum.java | 40 +-
.../org/apache/solr/core/backup/ShardBackupId.java | 83 +-
.../solr/core/backup/ShardBackupMetadata.java | 223 +-
.../org/apache/solr/core/backup/package-info.java | 37 +-
.../core/backup/repository/BackupRepository.java | 170 +-
.../backup/repository/BackupRepositoryFactory.java | 22 +-
.../repository/LocalFileSystemRepository.java | 70 +-
.../solr/core/backup/repository/package-info.java | 39 +-
.../java/org/apache/solr/core/package-info.java | 9 +-
.../core/snapshots/CollectionSnapshotMetaData.java | 79 +-
.../solr/core/snapshots/SolrSnapshotManager.java | 164 +-
.../snapshots/SolrSnapshotMetaDataManager.java | 104 +-
.../apache/solr/core/snapshots/package-info.java | 37 +-
.../apache/solr/filestore/DistribPackageStore.java | 184 +-
.../org/apache/solr/filestore/PackageStore.java | 59 +-
.../org/apache/solr/filestore/PackageStoreAPI.java | 226 +-
.../org/apache/solr/filestore/package-info.java | 5 +-
.../solr/handler/AnalysisRequestHandlerBase.java | 261 +-
.../org/apache/solr/handler/AnalyzeEvaluator.java | 21 +-
.../java/org/apache/solr/handler/BlobHandler.java | 170 +-
.../java/org/apache/solr/handler/CatStream.java | 60 +-
.../org/apache/solr/handler/ClassifyStream.java | 62 +-
.../java/org/apache/solr/handler/ClusterAPI.java | 264 +-
.../apache/solr/handler/CollectionBackupsAPI.java | 25 +-
.../org/apache/solr/handler/CollectionsAPI.java | 319 +-
.../solr/handler/ContentStreamHandlerBase.java | 25 +-
.../handler/DocumentAnalysisRequestHandler.java | 140 +-
.../apache/solr/handler/DumpRequestHandler.java | 52 +-
.../org/apache/solr/handler/ExportHandler.java | 47 +-
.../solr/handler/FieldAnalysisRequestHandler.java | 94 +-
.../java/org/apache/solr/handler/GraphHandler.java | 89 +-
.../solr/handler/HaversineMetersEvaluator.java | 9 +-
.../solr/handler/IncrementalShardBackup.java | 348 ++-
.../java/org/apache/solr/handler/IndexFetcher.java | 958 +++---
.../apache/solr/handler/MoreLikeThisHandler.java | 479 +--
.../apache/solr/handler/NestedRequestHandler.java | 7 +-
.../solr/handler/NotFoundRequestHandler.java | 13 +-
.../apache/solr/handler/OldBackupDirectory.java | 5 +-
.../apache/solr/handler/PingRequestHandler.java | 267 +-
.../apache/solr/handler/RealTimeGetHandler.java | 15 +-
.../apache/solr/handler/ReplicationHandler.java | 748 +++--
.../apache/solr/handler/RequestHandlerBase.java | 92 +-
.../apache/solr/handler/RequestHandlerUtils.java | 106 +-
.../java/org/apache/solr/handler/RestoreCore.java | 85 +-
.../org/apache/solr/handler/SchemaHandler.java | 232 +-
.../java/org/apache/solr/handler/SnapShooter.java | 193 +-
.../org/apache/solr/handler/SolrConfigHandler.java | 528 ++--
.../solr/handler/SolrDefaultStreamFactory.java | 9 +-
.../solr/handler/StandardRequestHandler.java | 10 +-
.../org/apache/solr/handler/StreamHandler.java | 154 +-
.../apache/solr/handler/UpdateRequestHandler.java | 152 +-
.../solr/handler/UpdateRequestHandlerApi.java | 28 +-
.../solr/handler/admin/AdminHandlersProxy.java | 45 +-
.../apache/solr/handler/admin/BackupCoreOp.java | 49 +-
.../solr/handler/admin/BaseHandlerApiSupport.java | 91 +-
.../apache/solr/handler/admin/ClusterStatus.java | 141 +-
.../org/apache/solr/handler/admin/ColStatus.java | 28 +-
.../solr/handler/admin/CollectionsHandler.java | 2506 ++++++++-------
.../solr/handler/admin/ConfigSetsHandler.java | 218 +-
.../solr/handler/admin/ContainerPluginsApi.java | 144 +-
.../solr/handler/admin/CoreAdminHandler.java | 239 +-
.../solr/handler/admin/CoreAdminOperation.java | 427 +--
.../solr/handler/admin/CreateSnapshotOp.java | 7 +-
.../solr/handler/admin/DeleteSnapshotOp.java | 4 +-
.../solr/handler/admin/HealthCheckHandler.java | 202 +-
.../solr/handler/admin/IndexSizeEstimator.java | 331 +-
.../org/apache/solr/handler/admin/InfoHandler.java | 40 +-
.../org/apache/solr/handler/admin/InvokeOp.java | 1 -
.../apache/solr/handler/admin/LoggingHandler.java | 52 +-
.../solr/handler/admin/LukeRequestHandler.java | 492 +--
.../apache/solr/handler/admin/MergeIndexesOp.java | 23 +-
.../handler/admin/MetricsCollectorHandler.java | 109 +-
.../apache/solr/handler/admin/MetricsHandler.java | 153 +-
.../solr/handler/admin/PluginInfoHandler.java | 40 +-
.../apache/solr/handler/admin/PrepRecoveryOp.java | 222 +-
.../handler/admin/PropertiesRequestHandler.java | 31 +-
.../solr/handler/admin/RebalanceLeaders.java | 328 +-
.../solr/handler/admin/RequestApplyUpdatesOp.java | 13 +-
.../solr/handler/admin/RequestSyncShardOp.java | 17 +-
.../apache/solr/handler/admin/RestoreCoreOp.java | 39 +-
.../solr/handler/admin/SecurityConfHandler.java | 161 +-
.../handler/admin/SecurityConfHandlerLocal.java | 35 +-
.../solr/handler/admin/SecurityConfHandlerZk.java | 50 +-
.../handler/admin/SegmentsInfoRequestHandler.java | 211 +-
.../solr/handler/admin/ShowFileRequestHandler.java | 198 +-
.../apache/solr/handler/admin/SolrEnvironment.java | 23 +-
.../solr/handler/admin/SolrInfoMBeanHandler.java | 200 +-
.../org/apache/solr/handler/admin/SplitOp.java | 217 +-
.../org/apache/solr/handler/admin/StatusOp.java | 20 +-
.../solr/handler/admin/SystemInfoHandler.java | 299 +-
.../solr/handler/admin/ThreadDumpHandler.java | 100 +-
.../solr/handler/admin/ZookeeperInfoHandler.java | 161 +-
.../solr/handler/admin/ZookeeperReadAPI.java | 92 +-
.../solr/handler/admin/ZookeeperStatusHandler.java | 147 +-
.../solr/handler/admin/api/AddReplicaAPI.java | 29 +-
.../handler/admin/api/AddReplicaPropertyAPI.java | 27 +-
.../solr/handler/admin/api/AllCoresStatusAPI.java | 35 +-
.../handler/admin/api/BalanceShardUniqueAPI.java | 30 +-
.../handler/admin/api/CollectionStatusAPI.java | 50 +-
.../solr/handler/admin/api/CreateCoreAPI.java | 75 +-
.../solr/handler/admin/api/CreateShardAPI.java | 29 +-
.../handler/admin/api/DeleteCollectionAPI.java | 30 +-
.../solr/handler/admin/api/DeleteReplicaAPI.java | 65 +-
.../admin/api/DeleteReplicaPropertyAPI.java | 27 +-
.../solr/handler/admin/api/DeleteShardAPI.java | 59 +-
.../solr/handler/admin/api/ForceLeaderAPI.java | 56 +-
.../solr/handler/admin/api/InvokeClassAPI.java | 55 +-
.../solr/handler/admin/api/MergeIndexesAPI.java | 90 +-
.../solr/handler/admin/api/MigrateDocsAPI.java | 27 +-
.../handler/admin/api/ModifyCollectionAPI.java | 29 +-
.../solr/handler/admin/api/MoveReplicaAPI.java | 27 +-
.../solr/handler/admin/api/NodeHealthAPI.java | 32 +-
.../solr/handler/admin/api/NodeLoggingAPI.java | 32 +-
.../solr/handler/admin/api/NodePropertiesAPI.java | 30 +-
.../solr/handler/admin/api/NodeSystemInfoAPI.java | 33 +-
.../solr/handler/admin/api/NodeThreadsAPI.java | 30 +-
.../handler/admin/api/OverseerOperationAPI.java | 54 +-
.../handler/admin/api/PrepareCoreRecoveryAPI.java | 56 +-
.../handler/admin/api/RebalanceLeadersAPI.java | 27 +-
.../handler/admin/api/RejoinLeaderElectionAPI.java | 69 +-
.../handler/admin/api/ReloadCollectionAPI.java | 27 +-
.../solr/handler/admin/api/ReloadCoreAPI.java | 53 +-
.../solr/handler/admin/api/RenameCoreAPI.java | 64 +-
.../admin/api/RequestApplyCoreUpdatesAPI.java | 57 +-
.../handler/admin/api/RequestBufferUpdatesAPI.java | 56 +-
.../admin/api/RequestCoreCommandStatusAPI.java | 50 +-
.../handler/admin/api/RequestCoreRecoveryAPI.java | 56 +-
.../handler/admin/api/RequestSyncShardAPI.java | 56 +-
.../admin/api/SchemaGetDynamicFieldAPI.java | 30 +-
.../solr/handler/admin/api/SchemaGetFieldAPI.java | 29 +-
.../handler/admin/api/SchemaGetFieldTypeAPI.java | 30 +-
.../solr/handler/admin/api/SchemaInfoAPI.java | 29 +-
.../admin/api/SchemaListAllCopyFieldsAPI.java | 28 +-
.../admin/api/SchemaListAllDynamicFieldsAPI.java | 28 +-
.../admin/api/SchemaListAllFieldTypesAPI.java | 28 +-
.../handler/admin/api/SchemaListAllFieldsAPI.java | 29 +-
.../solr/handler/admin/api/SchemaNameAPI.java | 29 +-
.../handler/admin/api/SchemaSimilarityAPI.java | 28 +-
.../solr/handler/admin/api/SchemaUniqueKeyAPI.java | 28 +-
.../solr/handler/admin/api/SchemaVersionAPI.java | 28 +-
.../solr/handler/admin/api/SchemaZkVersionAPI.java | 28 +-
.../admin/api/SetCollectionPropertyAPI.java | 29 +-
.../handler/admin/api/SingleCoreStatusAPI.java | 39 +-
.../solr/handler/admin/api/SplitCoreAPI.java | 109 +-
.../solr/handler/admin/api/SplitShardAPI.java | 29 +-
.../solr/handler/admin/api/SwapCoresAPI.java | 69 +-
.../solr/handler/admin/api/SyncShardAPI.java | 56 +-
.../solr/handler/admin/api/UnloadCoreAPI.java | 68 +-
.../solr/handler/admin/api/package-info.java | 6 +-
.../apache/solr/handler/admin/package-info.java | 6 +-
.../org/apache/solr/handler/api/ApiRegistrar.java | 10 +-
.../org/apache/solr/handler/api/V2ApiUtils.java | 16 +-
.../org/apache/solr/handler/api/package-info.java | 6 +-
.../component/ActiveTasksListComponent.java | 147 +-
.../handler/component/ActiveTasksListHandler.java | 115 +-
.../solr/handler/component/CloudReplicaSource.java | 105 +-
.../solr/handler/component/DebugComponent.java | 162 +-
.../solr/handler/component/ExpandComponent.java | 318 +-
.../solr/handler/component/FacetComponent.java | 659 ++--
.../solr/handler/component/FieldFacetStats.java | 40 +-
.../solr/handler/component/HighlightComponent.java | 85 +-
.../solr/handler/component/HttpShardHandler.java | 210 +-
.../handler/component/HttpShardHandlerFactory.java | 194 +-
.../handler/component/IterativeMergeStrategy.java | 28 +-
.../solr/handler/component/MergeStrategy.java | 53 +-
.../handler/component/MoreLikeThisComponent.java | 220 +-
.../component/PhrasesIdentificationComponent.java | 875 +++---
.../apache/solr/handler/component/PivotFacet.java | 83 +-
.../solr/handler/component/PivotFacetField.java | 292 +-
.../component/PivotFacetFieldValueCollection.java | 243 +-
.../solr/handler/component/PivotFacetHelper.java | 84 +-
.../handler/component/PivotFacetProcessor.java | 390 ++-
.../solr/handler/component/PivotFacetValue.java | 207 +-
.../component/QueryCancellationComponent.java | 128 +-
.../component/QueryCancellationHandler.java | 122 +-
.../solr/handler/component/QueryComponent.java | 1038 ++++---
.../handler/component/QueryElevationComponent.java | 746 +++--
.../handler/component/RangeFacetProcessor.java | 82 +-
.../solr/handler/component/RangeFacetRequest.java | 438 +--
.../handler/component/RealTimeGetComponent.java | 871 +++---
.../solr/handler/component/ReplicaSource.java | 4 +-
.../solr/handler/component/ResponseBuilder.java | 117 +-
.../handler/component/ResponseLogComponent.java | 65 +-
.../solr/handler/component/SearchComponent.java | 43 +-
.../solr/handler/component/SearchHandler.java | 314 +-
.../apache/solr/handler/component/ShardDoc.java | 47 +-
.../component/ShardFieldSortedHitQueue.java | 44 +-
.../solr/handler/component/ShardHandler.java | 6 +
.../handler/component/ShardHandlerFactory.java | 44 +-
.../solr/handler/component/ShardRequest.java | 54 +-
.../solr/handler/component/ShardResponse.java | 68 +-
.../handler/component/SortedDateStatsValues.java | 5 +-
.../component/SortedNumericStatsValues.java | 14 +-
.../handler/component/SpatialHeatmapFacets.java | 106 +-
.../handler/component/SpellCheckComponent.java | 266 +-
.../handler/component/SpellCheckMergeData.java | 9 +-
.../handler/component/StandaloneReplicaSource.java | 8 +-
.../solr/handler/component/StatsComponent.java | 37 +-
.../apache/solr/handler/component/StatsField.java | 419 +--
.../apache/solr/handler/component/StatsInfo.java | 131 +-
.../apache/solr/handler/component/StatsValues.java | 18 +-
.../solr/handler/component/StatsValuesFactory.java | 242 +-
.../solr/handler/component/SuggestComponent.java | 284 +-
.../handler/component/TaskManagementHandler.java | 194 +-
.../handler/component/TermVectorComponent.java | 151 +-
.../solr/handler/component/TermsComponent.java | 182 +-
.../solr/handler/component/package-info.java | 10 +-
.../designer/DefaultSampleDocumentsLoader.java | 75 +-
.../handler/designer/DefaultSchemaSuggester.java | 195 +-
.../solr/handler/designer/ManagedSchemaDiff.java | 93 +-
.../solr/handler/designer/SampleDocuments.java | 30 +-
.../handler/designer/SampleDocumentsLoader.java | 10 +-
.../solr/handler/designer/SchemaDesignerAPI.java | 501 ++-
.../designer/SchemaDesignerConfigSetHelper.java | 569 ++--
.../handler/designer/SchemaDesignerSettings.java | 36 +-
.../designer/SchemaDesignerSettingsDAO.java | 35 +-
.../solr/handler/designer/SchemaSuggester.java | 10 +-
.../apache/solr/handler/designer/package-info.java | 4 +-
.../solr/handler/export/BoolFieldWriter.java | 4 +-
.../solr/handler/export/DateFieldWriter.java | 18 +-
.../org/apache/solr/handler/export/DoubleComp.java | 1 -
.../solr/handler/export/DoubleFieldWriter.java | 19 +-
.../apache/solr/handler/export/DoubleValue.java | 8 +-
.../solr/handler/export/DoubleValueSortDoc.java | 3 +-
.../apache/solr/handler/export/ExportBuffers.java | 187 +-
.../apache/solr/handler/export/ExportWriter.java | 358 ++-
.../solr/handler/export/ExportWriterStream.java | 39 +-
.../apache/solr/handler/export/FieldWriter.java | 7 +-
.../solr/handler/export/FloatFieldWriter.java | 19 +-
.../org/apache/solr/handler/export/FloatValue.java | 10 +-
.../org/apache/solr/handler/export/IntComp.java | 3 +-
.../apache/solr/handler/export/IntFieldWriter.java | 19 +-
.../org/apache/solr/handler/export/IntValue.java | 10 +-
.../org/apache/solr/handler/export/LongComp.java | 2 +-
.../solr/handler/export/LongFieldWriter.java | 17 +-
.../org/apache/solr/handler/export/LongValue.java | 16 +-
.../solr/handler/export/MultiFieldWriter.java | 80 +-
.../solr/handler/export/QuadValueSortDoc.java | 12 +-
.../solr/handler/export/SingleValueSortDoc.java | 4 +-
.../org/apache/solr/handler/export/SortDoc.java | 9 +-
.../org/apache/solr/handler/export/SortQueue.java | 41 +-
.../org/apache/solr/handler/export/SortValue.java | 11 +-
.../solr/handler/export/StringFieldWriter.java | 60 +-
.../apache/solr/handler/export/StringValue.java | 12 +-
.../solr/handler/export/TripleValueSortDoc.java | 1 -
.../apache/solr/handler/export/package-info.java | 6 +-
.../org/apache/solr/handler/loader/CSVLoader.java | 16 +-
.../apache/solr/handler/loader/CSVLoaderBase.java | 295 +-
.../solr/handler/loader/ContentStreamLoader.java | 20 +-
.../apache/solr/handler/loader/JavabinLoader.java | 167 +-
.../org/apache/solr/handler/loader/JsonLoader.java | 283 +-
.../org/apache/solr/handler/loader/XMLLoader.java | 100 +-
.../apache/solr/handler/loader/package-info.java | 9 +-
.../java/org/apache/solr/handler/package-info.java | 8 +-
.../solr/handler/tagger/OffsetCorrector.java | 119 +-
.../solr/handler/tagger/TagClusterReducer.java | 125 +-
.../java/org/apache/solr/handler/tagger/TagLL.java | 84 +-
.../org/apache/solr/handler/tagger/Tagger.java | 133 +-
.../solr/handler/tagger/TaggerRequestHandler.java | 193 +-
.../solr/handler/tagger/TaggingAttribute.java | 28 +-
.../solr/handler/tagger/TaggingAttributeImpl.java | 9 +-
.../solr/handler/tagger/TermPrefixCursor.java | 71 +-
.../solr/handler/tagger/XmlOffsetCorrector.java | 83 +-
.../apache/solr/handler/tagger/package-info.java | 6 +-
.../highlight/BreakIteratorBoundaryScanner.java | 35 +-
.../org/apache/solr/highlight/DefaultEncoder.java | 8 +-
.../solr/highlight/DefaultSolrHighlighter.java | 529 ++--
.../org/apache/solr/highlight/GapFragmenter.java | 48 +-
.../solr/highlight/HighlightingPluginBase.java | 16 +-
.../org/apache/solr/highlight/HtmlEncoder.java | 5 +-
.../org/apache/solr/highlight/HtmlFormatter.java | 12 +-
.../solr/highlight/LuceneRegexFragmenter.java | 112 +-
.../org/apache/solr/highlight/RegexFragmenter.java | 61 +-
.../solr/highlight/ScoreOrderFragmentsBuilder.java | 9 +-
.../solr/highlight/SimpleBoundaryScanner.java | 5 +-
.../solr/highlight/SimpleFragListBuilder.java | 3 +-
.../solr/highlight/SimpleFragmentsBuilder.java | 8 +-
.../solr/highlight/SingleFragListBuilder.java | 3 +-
.../apache/solr/highlight/SolrBoundaryScanner.java | 8 +-
.../org/apache/solr/highlight/SolrEncoder.java | 2 +-
.../org/apache/solr/highlight/SolrFormatter.java | 5 +-
.../apache/solr/highlight/SolrFragListBuilder.java | 4 +-
.../org/apache/solr/highlight/SolrFragmenter.java | 5 +-
.../solr/highlight/SolrFragmentsBuilder.java | 61 +-
.../org/apache/solr/highlight/SolrHighlighter.java | 64 +-
.../solr/highlight/UnifiedSolrHighlighter.java | 184 +-
.../solr/highlight/WeightedFragListBuilder.java | 7 +-
.../org/apache/solr/highlight/package-info.java | 10 +-
.../solr/index/DefaultMergePolicyFactory.java | 11 +-
.../solr/index/LogByteSizeMergePolicyFactory.java | 8 +-
.../solr/index/LogDocMergePolicyFactory.java | 8 +-
.../org/apache/solr/index/MergePolicyFactory.java | 8 +-
.../apache/solr/index/MergePolicyFactoryArgs.java | 8 +-
.../apache/solr/index/NoMergePolicyFactory.java | 3 +-
.../solr/index/SimpleMergePolicyFactory.java | 17 +-
.../solr/index/SlowCompositeReaderWrapper.java | 68 +-
.../org/apache/solr/index/SortingMergePolicy.java | 3 +-
.../solr/index/SortingMergePolicyFactory.java | 15 +-
.../solr/index/TieredMergePolicyFactory.java | 8 +-
.../solr/index/UpgradeIndexMergePolicyFactory.java | 8 +-
.../solr/index/WrapperMergePolicyFactory.java | 54 +-
.../java/org/apache/solr/index/package-info.java | 7 +-
.../org/apache/solr/internal/csv/CSVParser.java | 344 +--
.../org/apache/solr/internal/csv/CSVPrinter.java | 100 +-
.../org/apache/solr/internal/csv/CSVStrategy.java | 135 +-
.../org/apache/solr/internal/csv/CSVUtils.java | 108 +-
.../org/apache/solr/internal/csv/CharBuffer.java | 331 +-
.../solr/internal/csv/ExtendedBufferedReader.java | 201 +-
.../org/apache/solr/internal/csv/package-info.java | 8 +-
.../java/org/apache/solr/legacy/BBoxStrategy.java | 262 +-
.../org/apache/solr/legacy/BBoxValueSource.java | 31 +-
.../org/apache/solr/legacy/LegacyDoubleField.java | 161 +-
.../java/org/apache/solr/legacy/LegacyField.java | 45 +-
.../org/apache/solr/legacy/LegacyFieldType.java | 61 +-
.../org/apache/solr/legacy/LegacyFloatField.java | 159 +-
.../org/apache/solr/legacy/LegacyIntField.java | 160 +-
.../org/apache/solr/legacy/LegacyLongField.java | 187 +-
.../solr/legacy/LegacyNumericRangeQuery.java | 737 +++--
.../solr/legacy/LegacyNumericTokenStream.java | 217 +-
.../org/apache/solr/legacy/LegacyNumericType.java | 11 +-
.../org/apache/solr/legacy/LegacyNumericUtils.java | 391 ++-
.../java/org/apache/solr/legacy/package-info.java | 6 +-
.../java/org/apache/solr/logging/CircularList.java | 79 +-
.../org/apache/solr/logging/DeprecationLog.java | 12 +-
.../org/apache/solr/logging/ListenerConfig.java | 3 +-
.../java/org/apache/solr/logging/LogWatcher.java | 77 +-
.../org/apache/solr/logging/LogWatcherConfig.java | 10 +-
.../java/org/apache/solr/logging/LoggerInfo.java | 21 +-
.../org/apache/solr/logging/MDCLoggingContext.java | 54 +-
.../java/org/apache/solr/logging/MDCSnapshot.java | 21 +-
.../java/org/apache/solr/logging/jul/JulInfo.java | 27 +-
.../org/apache/solr/logging/jul/JulWatcher.java | 77 +-
.../org/apache/solr/logging/jul/RecordHandler.java | 16 +-
.../org/apache/solr/logging/jul/package-info.java | 7 +-
.../apache/solr/logging/log4j2/Log4j2Watcher.java | 68 +-
.../apache/solr/logging/log4j2/package-info.java | 5 +-
.../java/org/apache/solr/logging/package-info.java | 8 +-
.../org/apache/solr/metrics/AggregateMetric.java | 50 +-
.../solr/metrics/AltBufferPoolMetricSet.java | 17 +-
.../solr/metrics/FilteringSolrMetricReporter.java | 12 +-
.../org/apache/solr/metrics/MetricSuppliers.java | 128 +-
.../java/org/apache/solr/metrics/MetricsMap.java | 118 +-
.../solr/metrics/OperatingSystemMetricSet.java | 23 +-
.../solr/metrics/SolrCoreContainerReporter.java | 13 +-
.../apache/solr/metrics/SolrCoreMetricManager.java | 97 +-
.../org/apache/solr/metrics/SolrCoreReporter.java | 13 +-
.../org/apache/solr/metrics/SolrMetricInfo.java | 29 +-
.../org/apache/solr/metrics/SolrMetricManager.java | 599 ++--
.../apache/solr/metrics/SolrMetricProducer.java | 43 +-
.../apache/solr/metrics/SolrMetricReporter.java | 34 +-
.../apache/solr/metrics/SolrMetricsContext.java | 99 +-
.../java/org/apache/solr/metrics/package-info.java | 6 +-
.../metrics/reporters/ReporterClientCache.java | 18 +-
.../metrics/reporters/SolrGraphiteReporter.java | 35 +-
.../solr/metrics/reporters/SolrJmxReporter.java | 115 +-
.../solr/metrics/reporters/SolrSlf4jReporter.java | 65 +-
.../metrics/reporters/jmx/JmxMetricsReporter.java | 161 +-
.../reporters/jmx/JmxObjectNameFactory.java | 31 +-
.../solr/metrics/reporters/jmx/package-info.java | 3 +-
.../solr/metrics/reporters/package-info.java | 4 +-
.../solr/metrics/reporters/solr/SolrReporter.java | 309 +-
.../solr/metrics/reporters/solr/package-info.java | 4 +-
.../src/java/org/apache/solr/package-info.java | 7 +-
.../packagemanager/DefaultPackageRepository.java | 25 +-
.../apache/solr/packagemanager/PackageManager.java | 738 +++--
.../solr/packagemanager/PackageRepository.java | 16 +-
.../apache/solr/packagemanager/PackageUtils.java | 166 +-
.../solr/packagemanager/RepositoryManager.java | 183 +-
.../apache/solr/packagemanager/SolrPackage.java | 22 +-
.../solr/packagemanager/SolrPackageInstance.java | 41 +-
.../apache/solr/packagemanager/package-info.java | 4 +-
.../apache/solr/parser/SolrQueryParserBase.java | 647 ++--
.../java/org/apache/solr/parser/package-info.java | 9 +-
.../src/java/org/apache/solr/pkg/PackageAPI.java | 249 +-
.../java/org/apache/solr/pkg/PackageListeners.java | 25 +-
.../solr/pkg/PackageListeningClassLoader.java | 283 +-
.../java/org/apache/solr/pkg/PackageLoader.java | 74 +-
.../org/apache/solr/pkg/PackagePluginHolder.java | 95 +-
.../src/java/org/apache/solr/pkg/package-info.java | 6 +-
.../java/org/apache/solr/query/FilterQuery.java | 14 +-
.../java/org/apache/solr/query/SolrRangeQuery.java | 127 +-
.../java/org/apache/solr/query/package-info.java | 6 +-
.../org/apache/solr/request/DocValuesFacets.java | 281 +-
.../org/apache/solr/request/DocValuesStats.java | 103 +-
.../org/apache/solr/request/IntervalFacets.java | 512 ++--
.../apache/solr/request/LocalSolrQueryRequest.java | 64 +-
.../org/apache/solr/request/NumericFacets.java | 220 +-
.../request/PerSegmentSingleValuedFaceting.java | 196 +-
.../apache/solr/request/RegexBytesRefFilter.java | 7 +-
.../java/org/apache/solr/request/SimpleFacets.java | 870 +++---
.../org/apache/solr/request/SolrQueryRequest.java | 78 +-
.../apache/solr/request/SolrQueryRequestBase.java | 77 +-
.../apache/solr/request/SolrRequestHandler.java | 45 +-
.../org/apache/solr/request/SolrRequestInfo.java | 35 +-
.../solr/request/SubstringBytesRefFilter.java | 12 +-
.../org/apache/solr/request/json/JSONUtil.java | 18 +-
.../solr/request/json/JsonQueryConverter.java | 61 +-
.../org/apache/solr/request/json/ObjectUtil.java | 58 +-
.../org/apache/solr/request/json/RequestUtil.java | 140 +-
.../org/apache/solr/request/json/package-info.java | 6 +-
.../apache/solr/request/macro/MacroExpander.java | 57 +-
.../apache/solr/request/macro/package-info.java | 6 +-
.../java/org/apache/solr/request/package-info.java | 8 +-
.../apache/solr/response/BasicResultContext.java | 9 +-
.../solr/response/BinaryQueryResponseWriter.java | 23 +-
.../apache/solr/response/BinaryResponseWriter.java | 87 +-
.../apache/solr/response/CSVResponseWriter.java | 169 +-
.../org/apache/solr/response/DocsStreamer.java | 73 +-
.../solr/response/GeoJSONResponseWriter.java | 231 +-
.../solr/response/GraphMLResponseWriter.java | 42 +-
.../apache/solr/response/JSONResponseWriter.java | 372 +--
.../java/org/apache/solr/response/JSONWriter.java | 88 +-
.../apache/solr/response/PHPResponseWriter.java | 16 +-
.../solr/response/PHPSerializedResponseWriter.java | 95 +-
.../apache/solr/response/PythonResponseWriter.java | 43 +-
.../apache/solr/response/QueryResponseWriter.java | 66 +-
.../solr/response/QueryResponseWriterUtil.java | 69 +-
.../apache/solr/response/RawResponseWriter.java | 76 +-
.../solr/response/RawShimTextResponseWriter.java | 22 +-
.../org/apache/solr/response/ResultContext.java | 22 +-
.../apache/solr/response/RubyResponseWriter.java | 20 +-
.../solr/response/SchemaXmlResponseWriter.java | 7 +-
.../org/apache/solr/response/SchemaXmlWriter.java | 211 +-
.../apache/solr/response/SmileResponseWriter.java | 23 +-
.../apache/solr/response/SolrQueryResponse.java | 244 +-
.../solr/response/TabularResponseWriter.java | 62 +-
.../apache/solr/response/TextResponseWriter.java | 130 +-
.../apache/solr/response/XMLResponseWriter.java | 7 +-
.../java/org/apache/solr/response/XMLWriter.java | 175 +-
.../org/apache/solr/response/package-info.java | 9 +-
.../transform/BaseEditorialTransformer.java | 16 +-
.../response/transform/ChildDocTransformer.java | 112 +-
.../transform/ChildDocTransformerFactory.java | 98 +-
.../response/transform/DocIdAugmenterFactory.java | 19 +-
.../solr/response/transform/DocTransformer.java | 102 +-
.../solr/response/transform/DocTransformers.java | 61 +-
.../response/transform/ElevatedMarkerFactory.java | 8 +-
.../response/transform/ExcludedMarkerFactory.java | 14 +-
.../transform/ExplainAugmenterFactory.java | 45 +-
.../response/transform/GeoTransformerFactory.java | 150 +-
.../transform/RawValueTransformerFactory.java | 62 +-
.../response/transform/RenameFieldTransformer.java | 17 +-
.../solr/response/transform/ScoreAugmenter.java | 11 +-
.../response/transform/ShardAugmenterFactory.java | 15 +-
.../transform/SubQueryAugmenterFactory.java | 224 +-
.../response/transform/TransformerFactory.java | 117 +-
.../response/transform/ValueAugmenterFactory.java | 11 +-
.../response/transform/ValueSourceAugmenter.java | 36 +-
.../solr/response/transform/package-info.java | 9 +-
.../org/apache/solr/rest/BaseSolrResource.java | 67 +-
.../java/org/apache/solr/rest/ManagedResource.java | 291 +-
.../apache/solr/rest/ManagedResourceObserver.java | 20 +-
.../apache/solr/rest/ManagedResourceStorage.java | 354 +--
.../src/java/org/apache/solr/rest/RestManager.java | 403 ++-
.../java/org/apache/solr/rest/package-info.java | 8 +-
.../solr/rest/schema/FieldTypeXmlAdapter.java | 164 +-
.../analysis/BaseManagedTokenFilterFactory.java | 71 +-
.../schema/analysis/ManagedStopFilterFactory.java | 59 +-
.../analysis/ManagedSynonymFilterFactory.java | 276 +-
.../analysis/ManagedSynonymGraphFilterFactory.java | 185 +-
.../schema/analysis/ManagedWordSetResource.java | 105 +-
.../solr/rest/schema/analysis/package-info.java | 10 +-
.../org/apache/solr/rest/schema/package-info.java | 7 +-
.../org/apache/solr/schema/AbstractEnumField.java | 187 +-
.../solr/schema/AbstractSpatialFieldType.java | 159 +-
.../schema/AbstractSpatialPrefixTreeFieldType.java | 116 +-
.../solr/schema/AbstractSubTypeFieldType.java | 75 +-
.../src/java/org/apache/solr/schema/BBoxField.java | 88 +-
.../java/org/apache/solr/schema/BinaryField.java | 24 +-
.../src/java/org/apache/solr/schema/BoolField.java | 90 +-
.../solr/schema/ClassicIndexSchemaFactory.java | 2 +-
.../org/apache/solr/schema/CollationField.java | 149 +-
.../apache/solr/schema/CoordinateFieldType.java | 28 +-
.../src/java/org/apache/solr/schema/CopyField.java | 22 +-
.../java/org/apache/solr/schema/CurrencyField.java | 27 +-
.../org/apache/solr/schema/CurrencyFieldType.java | 340 +-
.../java/org/apache/solr/schema/CurrencyValue.java | 115 +-
.../org/apache/solr/schema/DatePointField.java | 113 +-
.../org/apache/solr/schema/DateRangeField.java | 82 +-
.../org/apache/solr/schema/DateValueFieldType.java | 7 +-
.../org/apache/solr/schema/DenseVectorField.java | 483 +--
.../org/apache/solr/schema/DoublePointField.java | 30 +-
.../apache/solr/schema/DoubleValueFieldType.java | 7 +-
.../src/java/org/apache/solr/schema/EnumField.java | 63 +-
.../java/org/apache/solr/schema/EnumFieldType.java | 76 +-
.../apache/solr/schema/ExchangeRateProvider.java | 39 +-
.../org/apache/solr/schema/ExternalFileField.java | 64 +-
.../solr/schema/ExternalFileFieldReloader.java | 23 +-
.../org/apache/solr/schema/FieldProperties.java | 100 +-
.../src/java/org/apache/solr/schema/FieldType.java | 926 +++---
.../apache/solr/schema/FieldTypePluginLoader.java | 443 +--
.../solr/schema/FileExchangeRateProvider.java | 94 +-
.../org/apache/solr/schema/FloatPointField.java | 30 +-
.../apache/solr/schema/FloatValueFieldType.java | 7 +-
.../solr/schema/HasImplicitIndexAnalyzer.java | 6 +-
.../java/org/apache/solr/schema/IndexSchema.java | 1219 ++++----
.../org/apache/solr/schema/IndexSchemaFactory.java | 82 +-
.../java/org/apache/solr/schema/IntPointField.java | 27 +-
.../org/apache/solr/schema/IntValueFieldType.java | 8 +-
.../apache/solr/schema/JsonPreAnalyzedParser.java | 68 +-
.../solr/schema/LatLonPointSpatialField.java | 123 +-
.../org/apache/solr/schema/LongPointField.java | 27 +-
.../org/apache/solr/schema/LongValueFieldType.java | 7 +-
.../org/apache/solr/schema/ManagedIndexSchema.java | 632 ++--
.../solr/schema/ManagedIndexSchemaFactory.java | 291 +-
.../java/org/apache/solr/schema/NestPathField.java | 26 +-
.../java/org/apache/solr/schema/NumberType.java | 17 +-
.../org/apache/solr/schema/NumericFieldType.java | 176 +-
.../apache/solr/schema/NumericValueFieldType.java | 7 +-
.../solr/schema/OpenExchangeRatesOrgProvider.java | 145 +-
.../java/org/apache/solr/schema/PointField.java | 124 +-
.../src/java/org/apache/solr/schema/PointType.java | 155 +-
.../org/apache/solr/schema/PreAnalyzedField.java | 136 +-
.../org/apache/solr/schema/PrimitiveFieldType.java | 17 +-
.../org/apache/solr/schema/RandomSortField.java | 175 +-
.../src/java/org/apache/solr/schema/RankField.java | 82 +-
.../solr/schema/RptWithGeometrySpatialField.java | 58 +-
.../java/org/apache/solr/schema/SchemaAware.java | 18 +-
.../java/org/apache/solr/schema/SchemaField.java | 353 ++-
.../java/org/apache/solr/schema/SchemaManager.java | 259 +-
.../org/apache/solr/schema/SimilarityFactory.java | 57 +-
.../solr/schema/SimplePreAnalyzedParser.java | 357 ++-
.../org/apache/solr/schema/SortableTextField.java | 183 +-
.../org/apache/solr/schema/SpatialQueryable.java | 10 +-
.../SpatialRecursivePrefixTreeFieldType.java | 15 +-
.../src/java/org/apache/solr/schema/StrField.java | 42 +-
.../org/apache/solr/schema/StrFieldSource.java | 14 +-
.../src/java/org/apache/solr/schema/TextField.java | 103 +-
.../java/org/apache/solr/schema/TrieDateField.java | 93 +-
.../org/apache/solr/schema/TrieDoubleField.java | 46 +-
.../src/java/org/apache/solr/schema/TrieField.java | 361 ++-
.../org/apache/solr/schema/TrieFloatField.java | 45 +-
.../java/org/apache/solr/schema/TrieIntField.java | 31 +-
.../java/org/apache/solr/schema/TrieLongField.java | 32 +-
.../src/java/org/apache/solr/schema/UUIDField.java | 52 +-
.../apache/solr/schema/ZkIndexSchemaReader.java | 106 +-
.../java/org/apache/solr/schema/package-info.java | 8 +-
.../apache/solr/search/AbstractReRankQuery.java | 26 +-
.../org/apache/solr/search/AnalyticsQuery.java | 33 +-
.../src/java/org/apache/solr/search/BitDocSet.java | 55 +-
.../solr/search/BitsFilteredPostingsEnum.java | 1 -
.../org/apache/solr/search/BoolQParserPlugin.java | 25 +-
.../org/apache/solr/search/BoostQParserPlugin.java | 30 +-
.../java/org/apache/solr/search/CacheConfig.java | 84 +-
.../org/apache/solr/search/CacheRegenerator.java | 27 +-
.../java/org/apache/solr/search/CaffeineCache.java | 235 +-
.../apache/solr/search/CancellableCollector.java | 3 +-
.../solr/search/CollapsingQParserPlugin.java | 2106 +++++++------
.../solr/search/ComplexPhraseQParserPlugin.java | 106 +-
.../java/org/apache/solr/search/CursorMark.java | 196 +-
.../apache/solr/search/DelegatingCollector.java | 11 +-
.../java/org/apache/solr/search/DisMaxQParser.java | 117 +-
.../apache/solr/search/DisMaxQParserPlugin.java | 139 +-
.../java/org/apache/solr/search/DocIterator.java | 20 +-
.../src/java/org/apache/solr/search/DocList.java | 49 +-
.../java/org/apache/solr/search/DocListAndSet.java | 15 +-
.../src/java/org/apache/solr/search/DocSet.java | 72 +-
.../java/org/apache/solr/search/DocSetBuilder.java | 24 +-
.../org/apache/solr/search/DocSetCollector.java | 31 +-
.../org/apache/solr/search/DocSetProducer.java | 4 +-
.../java/org/apache/solr/search/DocSetQuery.java | 106 +-
.../java/org/apache/solr/search/DocSetUtil.java | 77 +-
.../src/java/org/apache/solr/search/DocSlice.java | 91 +-
.../solr/search/EarlyTerminatingCollector.java | 29 +-
.../search/EarlyTerminatingCollectorException.java | 37 +-
.../search/EarlyTerminatingSortingCollector.java | 61 +-
.../apache/solr/search/ExportQParserPlugin.java | 69 +-
.../apache/solr/search/ExtendedDismaxQParser.java | 720 ++---
.../solr/search/ExtendedDismaxQParserPlugin.java | 7 +-
.../java/org/apache/solr/search/ExtendedQuery.java | 7 +-
.../java/org/apache/solr/search/FieldParams.java | 18 +-
.../org/apache/solr/search/FieldQParserPlugin.java | 16 +-
.../org/apache/solr/search/FilteredDocIdSet.java | 48 +-
.../solr/search/FloatPayloadValueSource.java | 158 +-
.../org/apache/solr/search/FunctionQParser.java | 172 +-
.../apache/solr/search/FunctionQParserPlugin.java | 9 +-
.../solr/search/FunctionRangeQParserPlugin.java | 33 +-
.../org/apache/solr/search/FunctionRangeQuery.java | 22 +-
.../solr/search/GraphTermsQParserPlugin.java | 212 +-
.../src/java/org/apache/solr/search/Grouping.java | 310 +-
.../org/apache/solr/search/HashQParserPlugin.java | 113 +-
.../solr/search/IGainTermsQParserPlugin.java | 43 +-
.../org/apache/solr/search/JoinQParserPlugin.java | 80 +-
.../src/java/org/apache/solr/search/JoinQuery.java | 138 +-
.../search/LegacyNumericRangeQueryBuilder.java | 80 +-
.../java/org/apache/solr/search/LuceneQParser.java | 16 +-
.../apache/solr/search/LuceneQParserPlugin.java | 22 +-
.../org/apache/solr/search/MatchCostQuery.java | 7 +-
.../org/apache/solr/search/MaxScoreCollector.java | 1 -
.../org/apache/solr/search/MaxScoreQParser.java | 30 +-
.../apache/solr/search/MaxScoreQParserPlugin.java | 12 +-
.../org/apache/solr/search/MinHashQParser.java | 45 +-
.../apache/solr/search/MinHashQParserPlugin.java | 10 +-
.../apache/solr/search/NestedQParserPlugin.java | 22 +-
.../org/apache/solr/search/NoOpRegenerator.java | 20 +-
.../solr/search/NumericHidingLeafReader.java | 153 +-
.../solr/search/PayloadCheckQParserPlugin.java | 25 +-
.../solr/search/PayloadScoreQParserPlugin.java | 29 +-
.../java/org/apache/solr/search/PointMerger.java | 61 +-
.../java/org/apache/solr/search/PostFilter.java | 38 +-
.../apache/solr/search/PrefixQParserPlugin.java | 14 +-
.../src/java/org/apache/solr/search/QParser.java | 149 +-
.../java/org/apache/solr/search/QParserPlugin.java | 17 +-
.../java/org/apache/solr/search/QueryCommand.java | 88 +-
.../java/org/apache/solr/search/QueryContext.java | 13 +-
.../java/org/apache/solr/search/QueryParsing.java | 113 +-
.../java/org/apache/solr/search/QueryResult.java | 29 +-
.../org/apache/solr/search/QueryResultKey.java | 74 +-
.../java/org/apache/solr/search/QueryUtils.java | 74 +-
.../org/apache/solr/search/RankQParserPlugin.java | 55 +-
.../src/java/org/apache/solr/search/RankQuery.java | 16 +-
.../org/apache/solr/search/RawQParserPlugin.java | 21 +-
.../org/apache/solr/search/ReRankCollector.java | 96 +-
.../apache/solr/search/ReRankQParserPlugin.java | 52 +-
.../java/org/apache/solr/search/ReRankWeight.java | 16 +-
.../java/org/apache/solr/search/ReturnFields.java | 44 +-
.../solr/search/SignificantTermsQParserPlugin.java | 63 +-
.../apache/solr/search/SimpleQParserPlugin.java | 107 +-
.../src/java/org/apache/solr/search/SolrCache.java | 125 +-
.../java/org/apache/solr/search/SolrCacheBase.java | 54 +-
.../org/apache/solr/search/SolrCoreParser.java | 62 +-
.../apache/solr/search/SolrDocumentFetcher.java | 310 +-
.../org/apache/solr/search/SolrFieldCacheBean.java | 49 +-
.../org/apache/solr/search/SolrIndexSearcher.java | 1171 +++----
.../org/apache/solr/search/SolrQueryBuilder.java | 5 +-
.../org/apache/solr/search/SolrQueryParser.java | 6 +-
.../apache/solr/search/SolrQueryTimeoutImpl.java | 39 +-
.../org/apache/solr/search/SolrReturnFields.java | 291 +-
.../apache/solr/search/SolrSpanQueryBuilder.java | 5 +-
.../src/java/org/apache/solr/search/SortSpec.java | 89 +-
.../org/apache/solr/search/SortSpecParsing.java | 70 +-
.../org/apache/solr/search/SortedIntDocSet.java | 369 ++-
.../solr/search/SpatialBoxQParserPlugin.java | 5 +-
.../apache/solr/search/SpatialFilterQParser.java | 46 +-
.../solr/search/SpatialFilterQParserPlugin.java | 51 +-
.../org/apache/solr/search/SpatialOptions.java | 22 +-
.../src/java/org/apache/solr/search/StrParser.java | 512 ++--
.../apache/solr/search/SurroundQParserPlugin.java | 44 +-
.../apache/solr/search/SwitchQParserPlugin.java | 129 +-
.../java/org/apache/solr/search/SyntaxError.java | 2 +
.../org/apache/solr/search/TermQParserPlugin.java | 24 +-
.../org/apache/solr/search/TermsQParserPlugin.java | 125 +-
.../TextLogisticRegressionQParserPlugin.java | 83 +-
.../org/apache/solr/search/TopLevelJoinQuery.java | 129 +-
.../org/apache/solr/search/ValueSourceParser.java | 2069 +++++++------
.../java/org/apache/solr/search/WrappedQuery.java | 7 +-
.../org/apache/solr/search/XmlQParserPlugin.java | 77 +-
.../java/org/apache/solr/search/facet/AggUtil.java | 30 +-
.../apache/solr/search/facet/AggValueSource.java | 9 +-
.../java/org/apache/solr/search/facet/AvgAgg.java | 23 +-
.../org/apache/solr/search/facet/BlockJoin.java | 34 +-
.../org/apache/solr/search/facet/CountAgg.java | 3 +-
.../org/apache/solr/search/facet/CountValsAgg.java | 31 +-
.../org/apache/solr/search/facet/DocValuesAcc.java | 72 +-
.../org/apache/solr/search/facet/FacetBucket.java | 47 +-
.../org/apache/solr/search/facet/FacetContext.java | 18 +-
.../apache/solr/search/facet/FacetDebugInfo.java | 25 +-
.../org/apache/solr/search/facet/FacetField.java | 87 +-
.../apache/solr/search/facet/FacetFieldMerger.java | 77 +-
.../solr/search/facet/FacetFieldProcessor.java | 509 +--
.../search/facet/FacetFieldProcessorByArray.java | 72 +-
.../search/facet/FacetFieldProcessorByArrayDV.java | 142 +-
.../facet/FacetFieldProcessorByArrayUIF.java | 15 +-
.../FacetFieldProcessorByEnumTermsStream.java | 156 +-
.../search/facet/FacetFieldProcessorByHashDV.java | 398 +--
.../org/apache/solr/search/facet/FacetHeatmap.java | 324 +-
.../org/apache/solr/search/facet/FacetMerger.java | 42 +-
.../org/apache/solr/search/facet/FacetModule.java | 147 +-
.../org/apache/solr/search/facet/FacetParser.java | 289 +-
.../apache/solr/search/facet/FacetProcessor.java | 145 +-
.../org/apache/solr/search/facet/FacetQuery.java | 16 +-
.../org/apache/solr/search/facet/FacetRange.java | 1 -
.../apache/solr/search/facet/FacetRangeMerger.java | 82 +-
.../apache/solr/search/facet/FacetRangeParser.java | 10 +-
.../solr/search/facet/FacetRangeProcessor.java | 544 ++--
.../org/apache/solr/search/facet/FacetRequest.java | 199 +-
.../solr/search/facet/FacetRequestSorted.java | 18 +-
.../search/facet/FacetRequestSortedMerger.java | 197 +-
.../org/apache/solr/search/facet/FieldUtil.java | 51 +-
.../java/org/apache/solr/search/facet/HLLAgg.java | 38 +-
.../org/apache/solr/search/facet/LegacyFacet.java | 146 +-
.../org/apache/solr/search/facet/MinMaxAgg.java | 131 +-
.../org/apache/solr/search/facet/MissingAgg.java | 14 +-
.../apache/solr/search/facet/PercentileAgg.java | 97 +-
.../solr/search/facet/ReadOnlyCountSlotAcc.java | 4 +-
.../apache/solr/search/facet/RelatednessAgg.java | 399 +--
.../solr/search/facet/SimpleAggValueSource.java | 14 +-
.../solr/search/facet/SingletonDocIterator.java | 1 -
.../java/org/apache/solr/search/facet/SlotAcc.java | 382 +--
.../org/apache/solr/search/facet/StddevAgg.java | 37 +-
.../solr/search/facet/StrAggValueSource.java | 4 +-
.../java/org/apache/solr/search/facet/SumAgg.java | 28 +-
.../org/apache/solr/search/facet/SumsqAgg.java | 27 +-
.../apache/solr/search/facet/SweepCountAware.java | 123 +-
.../org/apache/solr/search/facet/SweepDISI.java | 23 +-
.../apache/solr/search/facet/SweepDocIterator.java | 14 +-
.../apache/solr/search/facet/UnInvertedField.java | 253 +-
.../solr/search/facet/UnInvertedFieldAcc.java | 29 +-
.../org/apache/solr/search/facet/UnionDISI.java | 20 +-
.../apache/solr/search/facet/UnionDocIterator.java | 18 +-
.../org/apache/solr/search/facet/UniqueAgg.java | 46 +-
.../apache/solr/search/facet/UniqueBlockAgg.java | 24 +-
.../solr/search/facet/UniqueBlockFieldAgg.java | 12 +-
.../solr/search/facet/UniqueBlockQueryAgg.java | 12 +-
.../solr/search/facet/UniqueMultiDvSlotAcc.java | 12 +-
.../search/facet/UniqueMultivaluedSlotAcc.java | 16 +-
.../search/facet/UniqueSinglevaluedSlotAcc.java | 16 +-
.../apache/solr/search/facet/UniqueSlotAcc.java | 30 +-
.../org/apache/solr/search/facet/VarianceAgg.java | 37 +-
.../org/apache/solr/search/facet/package-info.java | 5 +-
.../search/function/CollapseScoreFunction.java | 13 +-
.../solr/search/function/ConcatStringFunction.java | 10 +-
.../apache/solr/search/function/EqualFunction.java | 9 +-
.../solr/search/function/FieldNameValueSource.java | 18 +-
.../solr/search/function/FileFloatSource.java | 109 +-
.../solr/search/function/MultiStringFunction.java | 45 +-
.../solr/search/function/OrdFieldSource.java | 68 +-
.../search/function/ReverseOrdFieldSource.java | 62 +-
.../function/SolrComparisonBoolFunction.java | 5 +-
.../search/function/ValueSourceRangeFilter.java | 64 +-
.../distance/GeoDistValueSourceParser.java | 101 +-
.../search/function/distance/GeohashFunction.java | 22 +-
.../distance/GeohashHaversineFunction.java | 54 +-
.../function/distance/HaversineConstFunction.java | 39 +-
.../function/distance/HaversineFunction.java | 46 +-
.../distance/SquaredEuclideanFunction.java | 9 +-
.../function/distance/StringDistanceFunction.java | 23 +-
.../function/distance/VectorDistanceFunction.java | 58 +-
.../search/function/distance/package-info.java | 8 +-
.../apache/solr/search/function/package-info.java | 8 +-
.../org/apache/solr/search/grouping/Command.java | 18 +-
.../solr/search/grouping/CommandHandler.java | 94 +-
.../search/grouping/GroupingSpecification.java | 7 +-
.../search/grouping/collector/FilterCollector.java | 6 +-
.../search/grouping/collector/package-info.java | 7 +-
.../grouping/distributed/ShardRequestFactory.java | 8 +-
.../distributed/ShardResponseProcessor.java | 5 +-
.../distributed/command/GroupConverter.java | 51 +-
.../grouping/distributed/command/QueryCommand.java | 37 +-
.../distributed/command/QueryCommandResult.java | 4 +-
.../command/SearchGroupsFieldCommand.java | 28 +-
.../command/SearchGroupsFieldCommandResult.java | 8 +-
.../distributed/command/TopGroupsFieldCommand.java | 62 +-
.../grouping/distributed/command/package-info.java | 7 +-
.../search/grouping/distributed/package-info.java | 8 +-
.../requestfactory/SearchGroupsRequestFactory.java | 19 +-
.../StoredFieldsShardRequestFactory.java | 14 +-
.../TopGroupsShardRequestFactory.java | 47 +-
.../distributed/requestfactory/package-info.java | 8 +-
.../SearchGroupShardResponseProcessor.java | 56 +-
.../StoredFieldsShardResponseProcessor.java | 8 +-
.../TopGroupsShardResponseProcessor.java | 58 +-
.../responseprocessor/package-info.java | 8 +-
.../SearchGroupsResultTransformer.java | 63 +-
.../ShardResultTransformer.java | 13 +-
.../ShardResultTransformerUtils.java | 5 +-
.../TopGroupsResultTransformer.java | 100 +-
.../shardresultserializer/package-info.java | 8 +-
.../endresulttransformer/EndResultTransformer.java | 12 +-
.../GroupedEndResultTransformer.java | 22 +-
.../MainEndResultTransformer.java | 8 +-
.../SimpleEndResultTransformer.java | 7 +-
.../endresulttransformer/package-info.java | 6 +-
.../apache/solr/search/grouping/package-info.java | 5 +-
.../solr/search/join/BlockJoinChildQParser.java | 14 +-
.../search/join/BlockJoinChildQParserPlugin.java | 9 +-
.../solr/search/join/BlockJoinParentQParser.java | 41 +-
.../search/join/BlockJoinParentQParserPlugin.java | 14 +-
.../search/join/ChildFieldValueSourceParser.java | 83 +-
.../search/join/CrossCollectionJoinQParser.java | 38 +-
.../solr/search/join/CrossCollectionJoinQuery.java | 83 +-
.../apache/solr/search/join/FiltersQParser.java | 35 +-
.../solr/search/join/FiltersQParserPlugin.java | 4 +-
.../org/apache/solr/search/join/FrontierQuery.java | 19 +-
.../solr/search/join/GraphEdgeCollector.java | 35 +-
.../solr/search/join/GraphPointsCollector.java | 20 +-
.../solr/search/join/GraphQParserPlugin.java | 12 +-
.../org/apache/solr/search/join/GraphQuery.java | 188 +-
.../apache/solr/search/join/GraphQueryParser.java | 22 +-
.../apache/solr/search/join/HashRangeQParser.java | 3 +-
.../solr/search/join/HashRangeQParserPlugin.java | 8 +-
.../apache/solr/search/join/HashRangeQuery.java | 60 +-
.../join/MultiValueTermOrdinalCollector.java | 8 +-
.../solr/search/join/ScoreJoinQParserPlugin.java | 209 +-
.../apache/solr/search/join/ScoreModeParser.java | 33 +-
.../org/apache/solr/search/join/package-info.java | 8 +-
.../apache/solr/search/mlt/CloudMLTQParser.java | 55 +-
.../apache/solr/search/mlt/MLTQParserPlugin.java | 11 +-
.../apache/solr/search/mlt/SimpleMLTQParser.java | 65 +-
.../org/apache/solr/search/mlt/package-info.java | 8 +-
.../org/apache/solr/search/neural/KnnQParser.java | 135 +-
.../solr/search/neural/KnnQParserPlugin.java | 15 +-
.../apache/solr/search/neural/package-info.java | 8 +-
.../java/org/apache/solr/search/package-info.java | 9 +-
.../search/similarities/BM25SimilarityFactory.java | 18 +-
.../similarities/BooleanSimilarityFactory.java | 8 +-
.../similarities/ClassicSimilarityFactory.java | 26 +-
.../search/similarities/DFISimilarityFactory.java | 19 +-
.../search/similarities/DFRSimilarityFactory.java | 134 +-
.../search/similarities/IBSimilarityFactory.java | 62 +-
.../similarities/LMDirichletSimilarityFactory.java | 22 +-
.../LMJelinekMercerSimilarityFactory.java | 19 +-
.../similarities/SchemaSimilarityFactory.java | 68 +-
.../similarities/SweetSpotSimilarityFactory.java | 130 +-
.../solr/search/similarities/package-info.java | 12 +-
.../apache/solr/search/stats/CollectionStats.java | 19 +-
.../solr/search/stats/ExactSharedStatsCache.java | 48 +-
.../apache/solr/search/stats/ExactStatsCache.java | 181 +-
.../apache/solr/search/stats/LRUStatsCache.java | 119 +-
.../apache/solr/search/stats/LocalStatsCache.java | 9 +-
.../apache/solr/search/stats/LocalStatsSource.java | 15 +-
.../org/apache/solr/search/stats/StatsCache.java | 147 +-
.../org/apache/solr/search/stats/StatsSource.java | 19 +-
.../org/apache/solr/search/stats/StatsUtil.java | 60 +-
.../org/apache/solr/search/stats/TermStats.java | 19 +-
.../org/apache/solr/search/stats/package-info.java | 8 +-
.../apache/solr/security/AllowListUrlChecker.java | 15 +-
.../java/org/apache/solr/security/AuditEvent.java | 254 +-
.../apache/solr/security/AuditLoggerPlugin.java | 240 +-
.../apache/solr/security/AuthenticationPlugin.java | 109 +-
.../apache/solr/security/AuthorizationContext.java | 57 +-
.../apache/solr/security/AuthorizationPlugin.java | 5 +-
.../solr/security/AuthorizationResponse.java | 8 +-
.../solr/security/AutorizationEditOperation.java | 27 +-
.../org/apache/solr/security/BasicAuthPlugin.java | 84 +-
.../org/apache/solr/security/CertAuthPlugin.java | 46 +-
.../apache/solr/security/ConfigEditablePlugin.java | 19 +-
.../ExternalRoleRuleBasedAuthorizationPlugin.java | 14 +-
.../solr/security/HttpClientBuilderPlugin.java | 15 +-
.../org/apache/solr/security/MultiAuthPlugin.java | 87 +-
.../MultiAuthRuleBasedAuthorizationPlugin.java | 40 +-
.../solr/security/MultiDestinationAuditLogger.java | 50 +-
.../solr/security/PKIAuthenticationPlugin.java | 164 +-
.../java/org/apache/solr/security/Permission.java | 100 +-
.../solr/security/PermissionNameProvider.java | 19 +-
.../apache/solr/security/PrintWriterWrapper.java | 4 +-
.../org/apache/solr/security/PublicKeyHandler.java | 10 +-
.../security/RuleBasedAuthorizationPlugin.java | 11 +-
.../security/RuleBasedAuthorizationPluginBase.java | 153 +-
.../apache/solr/security/SecurityPluginHolder.java | 2 -
.../security/Sha256AuthenticationProvider.java | 38 +-
.../solr/security/SolrLogAuditLoggerPlugin.java | 60 +-
.../apache/solr/security/VerifiedUserRoles.java | 13 +-
.../org/apache/solr/security/package-info.java | 5 +-
.../org/apache/solr/servlet/BaseSolrFilter.java | 10 +-
.../org/apache/solr/servlet/BaseSolrServlet.java | 10 +-
.../solr/servlet/CheckLoggingConfiguration.java | 17 +-
.../apache/solr/servlet/CoreContainerProvider.java | 266 +-
.../apache/solr/servlet/DirectSolrConnection.java | 101 +-
.../java/org/apache/solr/servlet/HttpSolrCall.java | 440 +--
.../apache/solr/servlet/LoadAdminUiServlet.java | 51 +-
.../java/org/apache/solr/servlet/PathExcluder.java | 4 +-
.../org/apache/solr/servlet/QueryRateLimiter.java | 31 +-
.../org/apache/solr/servlet/RateLimitManager.java | 43 +-
.../org/apache/solr/servlet/RedirectServlet.java | 34 +-
.../apache/solr/servlet/RequestRateLimiter.java | 44 +-
.../org/apache/solr/servlet/ResponseUtils.java | 28 +-
.../solr/servlet/ServletInputStreamWrapper.java | 16 +-
.../solr/servlet/ServletOutputStreamWrapper.java | 13 +-
.../java/org/apache/solr/servlet/ServletUtils.java | 162 +-
.../solr/servlet/SolrAuthenticationException.java | 3 +-
.../apache/solr/servlet/SolrDispatchFilter.java | 196 +-
.../apache/solr/servlet/SolrRequestParsers.java | 636 ++--
.../solr/servlet/cache/HttpCacheHeaderUtil.java | 190 +-
.../java/org/apache/solr/servlet/cache/Method.java | 5 +-
.../apache/solr/servlet/cache/package-info.java | 8 +-
.../java/org/apache/solr/servlet/package-info.java | 8 +-
.../solr/spelling/AbstractLuceneSpellChecker.java | 107 +-
.../solr/spelling/ConjunctionSolrSpellChecker.java | 119 +-
.../solr/spelling/DirectSolrSpellChecker.java | 129 +-
.../solr/spelling/FileBasedSpellChecker.java | 56 +-
.../solr/spelling/IndexBasedSpellChecker.java | 27 +-
.../apache/solr/spelling/PossibilityIterator.java | 134 +-
.../org/apache/solr/spelling/QueryConverter.java | 60 +-
.../java/org/apache/solr/spelling/ResultEntry.java | 15 +-
.../org/apache/solr/spelling/SolrSpellChecker.java | 69 +-
.../apache/solr/spelling/SpellCheckCollation.java | 3 +-
.../apache/solr/spelling/SpellCheckCollator.java | 123 +-
.../apache/solr/spelling/SpellCheckCorrection.java | 3 +-
.../org/apache/solr/spelling/SpellingOptions.java | 76 +-
.../solr/spelling/SpellingQueryConverter.java | 151 +-
.../org/apache/solr/spelling/SpellingResult.java | 40 +-
.../solr/spelling/SuggestQueryConverter.java | 4 +-
.../src/java/org/apache/solr/spelling/Token.java | 96 +-
.../solr/spelling/WordBreakSolrSpellChecker.java | 246 +-
.../org/apache/solr/spelling/package-info.java | 11 +-
.../solr/spelling/suggest/DictionaryFactory.java | 22 +-
.../suggest/DocumentDictionaryFactory.java | 16 +-
.../DocumentExpressionDictionaryFactory.java | 42 +-
.../spelling/suggest/FileDictionaryFactory.java | 34 +-
.../suggest/HighFrequencyDictionaryFactory.java | 25 +-
.../solr/spelling/suggest/LookupFactory.java | 24 +-
.../solr/spelling/suggest/SolrSuggester.java | 150 +-
.../apache/solr/spelling/suggest/Suggester.java | 89 +-
.../solr/spelling/suggest/SuggesterOptions.java | 23 +-
.../solr/spelling/suggest/SuggesterParams.java | 52 +-
.../solr/spelling/suggest/SuggesterResult.java | 37 +-
.../suggest/fst/AnalyzingInfixLookupFactory.java | 121 +-
.../suggest/fst/AnalyzingLookupFactory.java | 114 +-
.../suggest/fst/BlendedInfixLookupFactory.java | 124 +-
.../spelling/suggest/fst/FSTLookupFactory.java | 42 +-
.../suggest/fst/FreeTextLookupFactory.java | 63 +-
.../spelling/suggest/fst/FuzzyLookupFactory.java | 162 +-
.../spelling/suggest/fst/WFSTLookupFactory.java | 19 +-
.../solr/spelling/suggest/fst/package-info.java | 10 +-
.../suggest/jaspell/JaspellLookupFactory.java | 6 +-
.../spelling/suggest/jaspell/package-info.java | 9 +-
.../apache/solr/spelling/suggest/package-info.java | 11 +-
.../spelling/suggest/tst/TSTLookupFactory.java | 4 +-
.../solr/spelling/suggest/tst/package-info.java | 9 +-
.../org/apache/solr/uninverting/DocTermOrds.java | 412 +--
.../org/apache/solr/uninverting/FieldCache.java | 517 ++--
.../apache/solr/uninverting/FieldCacheImpl.java | 485 +--
.../apache/solr/uninverting/UninvertingReader.java | 272 +-
.../org/apache/solr/uninverting/package-info.java | 4 +-
.../org/apache/solr/update/AddUpdateCommand.java | 162 +-
.../java/org/apache/solr/update/CommitTracker.java | 103 +-
.../apache/solr/update/CommitUpdateCommand.java | 33 +-
.../apache/solr/update/DefaultSolrCoreState.java | 228 +-
.../apache/solr/update/DeleteByQueryWrapper.java | 36 +-
.../apache/solr/update/DeleteUpdateCommand.java | 24 +-
.../apache/solr/update/DirectUpdateHandler2.java | 378 ++-
.../org/apache/solr/update/DocumentBuilder.java | 253 +-
.../org/apache/solr/update/IndexFingerprint.java | 46 +-
.../org/apache/solr/update/LoggingInfoStream.java | 9 +-
.../org/apache/solr/update/MemOutputStream.java | 12 +-
.../apache/solr/update/MergeIndexesCommand.java | 11 +-
.../src/java/org/apache/solr/update/PeerSync.java | 471 +--
.../org/apache/solr/update/PeerSyncWithLeader.java | 137 +-
.../apache/solr/update/RollbackUpdateCommand.java | 1 -
.../org/apache/solr/update/SolrCmdDistributor.java | 331 +-
.../java/org/apache/solr/update/SolrCoreState.java | 126 +-
.../org/apache/solr/update/SolrIndexConfig.java | 159 +-
.../org/apache/solr/update/SolrIndexSplitter.java | 241 +-
.../org/apache/solr/update/SolrIndexWriter.java | 209 +-
.../org/apache/solr/update/SplitIndexCommand.java | 24 +-
.../apache/solr/update/StreamingSolrClients.java | 27 +-
.../org/apache/solr/update/TimedVersionBucket.java | 19 +-
.../org/apache/solr/update/TransactionLog.java | 297 +-
.../java/org/apache/solr/update/UpdateCommand.java | 31 +-
.../java/org/apache/solr/update/UpdateHandler.java | 81 +-
.../src/java/org/apache/solr/update/UpdateLog.java | 894 +++---
.../org/apache/solr/update/UpdateShardHandler.java | 137 +-
.../solr/update/UpdateShardHandlerConfig.java | 21 +-
.../java/org/apache/solr/update/VersionBucket.java | 28 +-
.../java/org/apache/solr/update/VersionInfo.java | 144 +-
.../java/org/apache/solr/update/package-info.java | 8 +-
...AbstractDefaultValueUpdateProcessorFactory.java | 52 +-
.../AddSchemaFieldsUpdateProcessorFactory.java | 365 ++-
...llValuesOrNoneFieldMutatingUpdateProcessor.java | 92 +-
.../processor/AtomicUpdateDocumentMerger.java | 360 ++-
.../processor/AtomicUpdateProcessorFactory.java | 101 +-
.../processor/ClassificationUpdateProcessor.java | 45 +-
.../ClassificationUpdateProcessorFactory.java | 54 +-
.../ClassificationUpdateProcessorParams.java | 3 +-
.../CloneFieldUpdateProcessorFactory.java | 403 ++-
.../ConcatFieldUpdateProcessorFactory.java | 66 +-
.../CountFieldValuesUpdateProcessorFactory.java | 71 +-
.../DefaultValueUpdateProcessorFactory.java | 38 +-
.../processor/DistributedUpdateProcessor.java | 594 ++--
.../DistributedUpdateProcessorFactory.java | 38 +-
.../processor/DistributedZkUpdateProcessor.java | 644 ++--
.../DistributingUpdateProcessorFactory.java | 17 +-
.../DocBasedVersionConstraintsProcessor.java | 271 +-
...DocBasedVersionConstraintsProcessorFactory.java | 204 +-
.../DocExpirationUpdateProcessorFactory.java | 365 ++-
.../FieldLengthUpdateProcessorFactory.java | 53 +-
.../processor/FieldMutatingUpdateProcessor.java | 210 +-
.../FieldMutatingUpdateProcessorFactory.java | 184 +-
.../FieldNameMutatingUpdateProcessorFactory.java | 32 +-
.../FieldValueMutatingUpdateProcessor.java | 64 +-
.../FieldValueSubsetUpdateProcessorFactory.java | 42 +-
.../FirstFieldValueUpdateProcessorFactory.java | 32 +-
.../HTMLStripFieldUpdateProcessorFactory.java | 77 +-
...IgnoreCommitOptimizeUpdateProcessorFactory.java | 38 +-
.../IgnoreFieldUpdateProcessorFactory.java | 39 +-
.../IgnoreLargeDocumentProcessorFactory.java | 46 +-
.../LastFieldValueUpdateProcessorFactory.java | 46 +-
.../processor/LogUpdateProcessorFactory.java | 71 +-
.../solr/update/processor/Lookup3Signature.java | 19 +-
.../apache/solr/update/processor/MD5Signature.java | 22 +-
.../MaxFieldValueUpdateProcessorFactory.java | 41 +-
.../MinFieldValueUpdateProcessorFactory.java | 41 +-
.../processor/NestedUpdateProcessorFactory.java | 38 +-
.../NoOpDistributingUpdateProcessorFactory.java | 29 +-
.../ParseBooleanFieldUpdateProcessorFactory.java | 73 +-
.../ParseDateFieldUpdateProcessorFactory.java | 151 +-
.../ParseDoubleFieldUpdateProcessorFactory.java | 98 +-
.../ParseFloatFieldUpdateProcessorFactory.java | 92 +-
.../ParseIntFieldUpdateProcessorFactory.java | 88 +-
.../ParseLongFieldUpdateProcessorFactory.java | 91 +-
.../ParseNumericFieldUpdateProcessorFactory.java | 35 +-
.../PreAnalyzedUpdateProcessorFactory.java | 98 +-
.../processor/RegexReplaceProcessorFactory.java | 87 +-
.../update/processor/RegexpBoostProcessor.java | 41 +-
.../processor/RegexpBoostProcessorFactory.java | 34 +-
.../RemoveBlankFieldUpdateProcessorFactory.java | 46 +-
.../processor/RoutedAliasUpdateProcessor.java | 109 +-
.../processor/RunUpdateProcessorFactory.java | 25 +-
.../apache/solr/update/processor/Signature.java | 10 +-
.../processor/SignatureUpdateProcessorFactory.java | 103 +-
.../processor/SimpleUpdateProcessorFactory.java | 25 +-
.../SkipExistingDocumentsProcessorFactory.java | 144 +-
.../processor/TemplateUpdateProcessorFactory.java | 65 +-
.../update/processor/TextProfileSignature.java | 61 +-
.../processor/TimestampUpdateProcessorFactory.java | 35 +-
.../update/processor/TolerantUpdateProcessor.java | 217 +-
.../processor/TolerantUpdateProcessorFactory.java | 121 +-
.../processor/TrimFieldUpdateProcessorFactory.java | 44 +-
.../TruncateFieldUpdateProcessorFactory.java | 81 +-
.../update/processor/URLClassifyProcessor.java | 110 +-
.../processor/URLClassifyProcessorFactory.java | 12 +-
.../processor/UUIDUpdateProcessorFactory.java | 52 +-
.../UniqFieldsUpdateProcessorFactory.java | 34 +-
.../update/processor/UpdateRequestProcessor.java | 42 +-
.../processor/UpdateRequestProcessorChain.java | 221 +-
.../processor/UpdateRequestProcessorFactory.java | 23 +-
.../apache/solr/update/processor/package-info.java | 8 +-
.../org/apache/solr/util/AdjustableSemaphore.java | 29 +-
.../java/org/apache/solr/util/BoundedTreeSet.java | 14 +-
solr/core/src/java/org/apache/solr/util/CLIO.java | 3 +-
.../org/apache/solr/util/ConcurrentLRUCache.java | 374 ++-
.../src/java/org/apache/solr/util/CryptoKeys.java | 116 +-
.../java/org/apache/solr/util/DOMConfigNode.java | 31 +-
.../java/org/apache/solr/util/DataConfigNode.java | 52 +-
.../java/org/apache/solr/util/DateMathParser.java | 312 +-
.../java/org/apache/solr/util/DistanceUnits.java | 57 +-
.../src/java/org/apache/solr/util/DynamicMap.java | 13 +-
.../src/java/org/apache/solr/util/ExportTool.java | 336 +-
.../src/java/org/apache/solr/util/FileUtils.java | 37 +-
.../java/org/apache/solr/util/FloatConsumer.java | 7 +-
.../src/java/org/apache/solr/util/IOFunction.java | 1 +
.../src/java/org/apache/solr/util/IdUtils.java | 22 +-
.../org/apache/solr/util/IntFloatDynamicMap.java | 14 +-
.../org/apache/solr/util/IntIntDynamicMap.java | 15 +-
.../org/apache/solr/util/IntLongDynamicMap.java | 15 +-
.../src/java/org/apache/solr/util/JmxUtil.java | 18 +-
.../java/org/apache/solr/util/LongIterator.java | 25 +-
.../org/apache/solr/util/LongPriorityQueue.java | 105 +-
.../src/java/org/apache/solr/util/LongSet.java | 22 +-
.../src/java/org/apache/solr/util/MapListener.java | 4 +-
.../apache/solr/util/MockSearchableSolrClient.java | 42 +-
.../src/java/org/apache/solr/util/ModuleUtils.java | 50 +-
.../src/java/org/apache/solr/util/NumberUtils.java | 55 +-
.../java/org/apache/solr/util/OrderedExecutor.java | 33 +-
.../src/java/org/apache/solr/util/PackageTool.java | 311 +-
.../java/org/apache/solr/util/PayloadUtils.java | 43 +-
.../java/org/apache/solr/util/PivotListEntry.java | 31 +-
.../src/java/org/apache/solr/util/PrimUtils.java | 22 +-
.../apache/solr/util/PropertiesInputStream.java | 10 +-
.../apache/solr/util/PropertiesOutputStream.java | 8 +-
.../core/src/java/org/apache/solr/util/RTimer.java | 21 +-
.../src/java/org/apache/solr/util/RTimerTree.java | 37 +-
.../org/apache/solr/util/RecordingJSONParser.java | 8 +-
.../java/org/apache/solr/util/RedactionUtils.java | 21 +-
.../src/java/org/apache/solr/util/RefCounted.java | 16 +-
.../java/org/apache/solr/util/RegexFileFilter.java | 9 +-
.../java/org/apache/solr/util/SafeXMLParsing.java | 83 +-
.../java/org/apache/solr/util/SimplePostTool.java | 709 +++--
.../src/java/org/apache/solr/util/SolrCLI.java | 3239 +++++++++++---------
.../solr/util/SolrJacksonAnnotationInspector.java | 33 +-
.../java/org/apache/solr/util/SolrLogPostTool.java | 156 +-
.../java/org/apache/solr/util/SolrPluginUtils.java | 670 ++--
.../src/java/org/apache/solr/util/SolrVersion.java | 41 +-
.../java/org/apache/solr/util/SpatialUtils.java | 76 +-
.../org/apache/solr/util/StartupLoggingUtils.java | 92 +-
.../org/apache/solr/util/SystemIdResolver.java | 89 +-
.../java/org/apache/solr/util/TestInjection.java | 256 +-
.../src/java/org/apache/solr/util/TimeOut.java | 17 +-
.../java/org/apache/solr/util/TimeZoneUtils.java | 43 +-
.../java/org/apache/solr/util/VersionedFile.java | 34 +-
.../util/circuitbreaker/CPUCircuitBreaker.java | 28 +-
.../solr/util/circuitbreaker/CircuitBreaker.java | 35 +-
.../util/circuitbreaker/CircuitBreakerManager.java | 47 +-
.../util/circuitbreaker/MemoryCircuitBreaker.java | 34 +-
.../solr/util/circuitbreaker/package-info.java | 6 +-
.../solr/util/configuration/SSLConfigurations.java | 34 +-
.../configuration/SSLConfigurationsFactory.java | 11 +-
.../util/configuration/SSLCredentialProvider.java | 5 +-
.../SSLCredentialProviderFactory.java | 43 +-
.../solr/util/configuration/package-info.java | 8 +-
.../providers/AbstractSSLCredentialProvider.java | 35 +-
.../providers/EnvSSLCredentialProvider.java | 34 +-
.../providers/SysPropSSLCredentialProvider.java | 4 +-
.../util/configuration/providers/package-info.java | 8 +-
.../hll/BigEndianAscendingWordDeserializer.java | 278 +-
.../util/hll/BigEndianAscendingWordSerializer.java | 281 +-
.../src/java/org/apache/solr/util/hll/BitUtil.java | 94 +-
.../java/org/apache/solr/util/hll/BitVector.java | 476 +--
.../src/java/org/apache/solr/util/hll/HLL.java | 2012 ++++++------
.../java/org/apache/solr/util/hll/HLLMetadata.java | 230 +-
.../src/java/org/apache/solr/util/hll/HLLType.java | 14 +-
.../src/java/org/apache/solr/util/hll/HLLUtil.java | 362 +--
.../org/apache/solr/util/hll/IHLLMetadata.java | 83 +-
.../org/apache/solr/util/hll/ISchemaVersion.java | 108 +-
.../apache/solr/util/hll/IWordDeserializer.java | 37 +-
.../org/apache/solr/util/hll/IWordSerializer.java | 31 +-
.../java/org/apache/solr/util/hll/NumberUtil.java | 288 +-
.../org/apache/solr/util/hll/SchemaVersionOne.java | 259 +-
.../apache/solr/util/hll/SerializationUtil.java | 460 ++-
.../org/apache/solr/util/hll/package-info.java | 10 +-
.../java/org/apache/solr/util/package-info.java | 8 +-
.../solr/util/plugin/AbstractPluginLoader.java | 227 +-
.../solr/util/plugin/MapInitializedPlugin.java | 3 +-
.../apache/solr/util/plugin/MapPluginLoader.java | 22 +-
.../util/plugin/NamedListInitializedPlugin.java | 5 +-
.../solr/util/plugin/NamedListPluginLoader.java | 18 +-
.../solr/util/plugin/PluginInfoInitialized.java | 4 +-
.../org/apache/solr/util/plugin/SolrCoreAware.java | 5 +-
.../org/apache/solr/util/plugin/package-info.java | 7 +-
.../util/stats/HttpClientMetricNameStrategy.java | 5 +-
.../stats/InstrumentedHttpListenerFactory.java | 25 +-
.../stats/InstrumentedHttpRequestExecutor.java | 50 +-
...rumentedPoolingHttpClientConnectionManager.java | 28 +-
.../org/apache/solr/util/stats/MetricUtils.java | 779 +++--
.../org/apache/solr/util/stats/package-info.java | 8 +-
.../solr/util/tracing/HttpServletCarrier.java | 89 +-
.../solr/util/tracing/SolrRequestCarrier.java | 7 +-
.../org/apache/solr/util/tracing/TraceUtils.java | 3 +-
.../org/apache/solr/util/tracing/package-info.java | 6 +-
.../org/apache/solr/TestDistributedSearch.java | 18 +-
.../org/apache/solr/cloud/ShardRoutingTest.java | 63 +-
.../solr/cloud/TestDistribDocBasedVersion.java | 56 +-
.../org/apache/solr/cloud/TestHashPartitioner.java | 56 +-
.../solr/search/facet/TestJsonFacetRefinement.java | 22 +-
.../apache/solr/search/facet/TestJsonFacets.java | 17 +-
.../solr/update/TestIndexingPerformance.java | 30 +-
.../src/test/org/apache/solr/util/BitSetPerf.java | 8 +-
.../solr/hdfs/store/blockcache/BlockCacheTest.java | 12 +-
.../solr/client/solrj/impl/XMLResponseParser.java | 10 +-
.../src/java/org/apache/solr/common/util/Hash.java | 44 +-
.../apache/solr/common/util/JsonTextWriter.java | 10 +-
.../java/org/apache/solr/common/util/StrUtils.java | 10 +-
solr/solrj/src/java/org/noggit/CharArr.java | 28 +-
solr/solrj/src/java/org/noggit/JSONParser.java | 138 +-
.../solr/client/solrj/SolrExceptionTest.java | 10 +-
.../solr/client/solrj/TestSolrJErrorHandling.java | 58 +-
.../test/org/apache/solr/common/util/TestHash.java | 24 +-
.../cloud/AbstractBasicDistributedZkTestBase.java | 18 +-
.../solr/cloud/AbstractFullDistribZkTestBase.java | 12 +-
1396 files changed, 83105 insertions(+), 67444 deletions(-)
diff --git a/gradle/validation/spotless.gradle b/gradle/validation/spotless.gradle
index 7b7a82f..6ee640e 100644
--- a/gradle/validation/spotless.gradle
+++ b/gradle/validation/spotless.gradle
@@ -45,7 +45,13 @@ configure(project(":solr").subprojects) { prj ->
// Exclude certain files (generated ones, mostly).
switch (project.path) {
case ":solr:core":
- targetExclude "src/java/**/*.java"
+ // These are excluded since they are generated by javacc
+ targetExclude "src/java/org/apache/solr/parser/ParseException.java",
+ "src/java/org/apache/solr/parser/QueryParser.java",
+ "src/java/org/apache/solr/parser/QueryParserConstants.java",
+ "src/java/org/apache/solr/parser/QueryParserTokenManager.java",
+ "src/java/org/apache/solr/parser/Token.java",
+ "src/java/org/apache/solr/parser/TokenMgrError.java"
break
case ":solr:solrj":
targetExclude "src/test/org/apache/solr/client/solrj/io/stream/*.java"
diff --git a/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizer.java b/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizer.java
index 1361283..41fe0ad 100644
--- a/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizer.java
+++ b/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizer.java
@@ -17,7 +17,6 @@
package org.apache.solr.analysis;
import java.io.IOException;
-
import org.apache.lucene.analysis.CharacterUtils;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
@@ -26,46 +25,41 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.util.AttributeFactory;
/**
- * LowerCaseTokenizer performs the function of LetterTokenizer
- * and LowerCaseFilter together. It divides text at non-letters and converts
- * them to lower case. While it is functionally equivalent to the combination
- * of LetterTokenizer and LowerCaseFilter, there is a performance advantage
- * to doing the two tasks at once, hence this (redundant) implementation.
- * <P>
- * Note: this does a decent job for most European languages, but does a terrible
- * job for some Asian languages, where words are not separated by spaces.
- * </p>
+ * LowerCaseTokenizer performs the function of LetterTokenizer and LowerCaseFilter together. It
+ * divides text at non-letters and converts them to lower case. While it is functionally equivalent
+ * to the combination of LetterTokenizer and LowerCaseFilter, there is a performance advantage to
+ * doing the two tasks at once, hence this (redundant) implementation.
+ *
+ * <p>Note: this does a decent job for most European languages, but does a terrible job for some
+ * Asian languages, where words are not separated by spaces.
*
* @deprecated Use {@link LetterTokenizer} and {@link org.apache.lucene.analysis.LowerCaseFilter}
*/
@Deprecated
public final class LowerCaseTokenizer extends Tokenizer {
- /**
- * Construct a new LowerCaseTokenizer.
- */
+ /** Construct a new LowerCaseTokenizer. */
public LowerCaseTokenizer() {
this.maxTokenLen = DEFAULT_MAX_WORD_LEN;
}
/**
- * Construct a new LowerCaseTokenizer using a given
- * {@link org.apache.lucene.util.AttributeFactory}.
+ * Construct a new LowerCaseTokenizer using a given {@link
+ * org.apache.lucene.util.AttributeFactory}.
*
- * @param factory
- * the attribute factory to use for this {@link Tokenizer}
+ * @param factory the attribute factory to use for this {@link Tokenizer}
*/
public LowerCaseTokenizer(AttributeFactory factory) {
this(factory, DEFAULT_MAX_WORD_LEN);
}
/**
- * Construct a new LowerCaseTokenizer using a given
- * {@link org.apache.lucene.util.AttributeFactory}.
+ * Construct a new LowerCaseTokenizer using a given {@link
+ * org.apache.lucene.util.AttributeFactory}.
*
* @param factory the attribute factory to use for this {@link Tokenizer}
- * @param maxTokenLen maximum token length the tokenizer will emit.
- * Must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
+ * @param maxTokenLen maximum token length the tokenizer will emit. Must be greater than 0 and
+ * less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
* @throws IllegalArgumentException if maxTokenLen is invalid.
*/
public LowerCaseTokenizer(AttributeFactory factory, int maxTokenLen) {
@@ -81,7 +75,8 @@ public final class LowerCaseTokenizer extends Tokenizer {
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
- private final CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE);
+ private final CharacterUtils.CharacterBuffer ioBuffer =
+ CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE);
@Override
public final boolean incrementToken() throws IOException {
@@ -106,26 +101,30 @@ public final class LowerCaseTokenizer extends Tokenizer {
dataLen = ioBuffer.getLength();
bufferIndex = 0;
}
- // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone
+ // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based
+ // methods are gone
final int c = Character.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength());
final int charCount = Character.charCount(c);
bufferIndex += charCount;
- if (Character.isLetter(c)) { // if it's a token char
- if (length == 0) { // start of token
+ if (Character.isLetter(c)) { // if it's a token char
+ if (length == 0) { // start of token
assert start == -1;
start = offset + bufferIndex - charCount;
end = start;
- } else if (length >= buffer.length-1) { // check if a supplementary could run out of bounds
- buffer = termAtt.resizeBuffer(2+length); // make sure a supplementary fits in the buffer
+ } else if (length
+ >= buffer.length - 1) { // check if a supplementary could run out of bounds
+ buffer = termAtt.resizeBuffer(2 + length); // make sure a supplementary fits in the buffer
}
end += charCount;
- length += Character.toChars(Character.toLowerCase(c), buffer, length); // buffer it, normalized
- if (length >= maxTokenLen) { // buffer overflow! make sure to check for >= surrogate pair could break == test
+ // buffer it, normalized
+ length += Character.toChars(Character.toLowerCase(c), buffer, length);
+ // buffer overflow! make sure to check for >= surrogate pair could break == test
+ if (length >= maxTokenLen) {
break;
}
- } else if (length > 0) { // at non-Letter w/ chars
- break; // return 'em
+ } else if (length > 0) { // at non-Letter w/ chars
+ break; // return 'em
}
}
@@ -133,7 +132,6 @@ public final class LowerCaseTokenizer extends Tokenizer {
assert start != -1;
offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end));
return true;
-
}
@Override
@@ -152,5 +150,4 @@ public final class LowerCaseTokenizer extends Tokenizer {
finalOffset = 0;
ioBuffer.reset(); // make sure to reset the IO buffer!!
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizerFactory.java b/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizerFactory.java
index 9dfbc56..66ce856 100644
--- a/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizerFactory.java
+++ b/solr/core/src/java/org/apache/solr/analysis/LowerCaseTokenizerFactory.java
@@ -16,31 +16,34 @@
*/
package org.apache.solr.analysis;
-import java.util.Map;
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+import java.util.Map;
+import org.apache.lucene.analysis.TokenizerFactory;
import org.apache.lucene.analysis.core.LowerCaseFilterFactory;
import org.apache.lucene.analysis.util.CharTokenizer;
-import org.apache.lucene.analysis.TokenizerFactory;
import org.apache.lucene.util.AttributeFactory;
-import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
-
/**
* Factory for {@link LowerCaseTokenizer}.
+ *
* <pre class="prettyprint">
* <fieldType name="text_lwrcase" class="solr.TextField" positionIncrementGap="100">
* <analyzer>
* <tokenizer class="solr.LowerCaseTokenizerFactory" maxTokenLen="256"/>
* </analyzer>
* </fieldType></pre>
- * <p>
- * Options:
+ *
+ * <p>Options:
+ *
* <ul>
- * <li>maxTokenLen: max token length, should be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024).
- * It is rare to need to change this
- * else {@link CharTokenizer}::DEFAULT_MAX_WORD_LEN</li>
+ * <li>maxTokenLen: max token length, should be greater than 0 and less than
+ * MAX_TOKEN_LENGTH_LIMIT (1024*1024). It is rare to need to change this else {@link
+ * CharTokenizer}::DEFAULT_MAX_WORD_LEN
* </ul>
- * @deprecated Use {@link org.apache.lucene.analysis.core.LetterTokenizerFactory} and {@link LowerCaseFilterFactory}
+ *
+ * @deprecated Use {@link org.apache.lucene.analysis.core.LetterTokenizerFactory} and {@link
+ * LowerCaseFilterFactory}
* @lucene.spi {@value #NAME}
*/
@Deprecated
@@ -51,14 +54,16 @@ public class LowerCaseTokenizerFactory extends TokenizerFactory {
private final int maxTokenLen;
- /**
- * Creates a new LowerCaseTokenizerFactory
- */
+ /** Creates a new LowerCaseTokenizerFactory */
public LowerCaseTokenizerFactory(Map<String, String> args) {
super(args);
maxTokenLen = getInt(args, "maxTokenLen", CharTokenizer.DEFAULT_MAX_WORD_LEN);
if (maxTokenLen > MAX_TOKEN_LENGTH_LIMIT || maxTokenLen <= 0) {
- throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + maxTokenLen);
+ throw new IllegalArgumentException(
+ "maxTokenLen must be greater than 0 and less than "
+ + MAX_TOKEN_LENGTH_LIMIT
+ + " passed: "
+ + maxTokenLen);
}
if (!args.isEmpty()) {
throw new IllegalArgumentException("Unknown parameters: " + args);
@@ -74,5 +79,4 @@ public class LowerCaseTokenizerFactory extends TokenizerFactory {
public LowerCaseTokenizer create(AttributeFactory factory) {
return new LowerCaseTokenizer(factory, maxTokenLen);
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java b/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
index 37fd95b..5512284 100644
--- a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
+++ b/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
@@ -15,27 +15,26 @@
* limitations under the License.
*/
package org.apache.solr.analysis;
-import java.io.IOException;
+import java.io.IOException;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
/**
- * This class produces a special form of reversed tokens, suitable for
- * better handling of leading wildcards. Tokens from the input TokenStream
- * are reversed and prepended with a special "reversed" marker character.
- * If <code>withOriginal</code> argument is <code>true</code> then first the
- * original token is returned, and then the reversed token (with
- * <code>positionIncrement == 0</code>) is returned. Otherwise only reversed
- * tokens are returned.
- * <p>Note: this filter doubles the number of tokens in the input stream when
- * <code>withOriginal == true</code>, which proportionally increases the size
- * of postings and term dictionary in the index.
+ * This class produces a special form of reversed tokens, suitable for better handling of leading
+ * wildcards. Tokens from the input TokenStream are reversed and prepended with a special "reversed"
+ * marker character. If <code>withOriginal</code> argument is <code>true</code> then first the
+ * original token is returned, and then the reversed token (with <code>positionIncrement == 0</code>
+ * ) is returned. Otherwise only reversed tokens are returned.
+ *
+ * <p>Note: this filter doubles the number of tokens in the input stream when <code>
+ * withOriginal == true</code>, which proportionally increases the size of postings and term
+ * dictionary in the index.
*/
public final class ReversedWildcardFilter extends TokenFilter {
-
+
private final boolean withOriginal;
private final char markerChar;
private final CharTermAttribute termAtt;
@@ -52,7 +51,7 @@ public final class ReversedWildcardFilter extends TokenFilter {
@Override
public boolean incrementToken() throws IOException {
- if( save != null ) {
+ if (save != null) {
// clearAttributes(); // not currently necessary
restoreState(save);
save = null;
@@ -63,34 +62,32 @@ public final class ReversedWildcardFilter extends TokenFilter {
// pass through zero-length terms
int oldLen = termAtt.length();
- if (oldLen ==0) return true;
+ if (oldLen == 0) return true;
int origOffset = posAtt.getPositionIncrement();
- if (withOriginal == true){
+ if (withOriginal == true) {
posAtt.setPositionIncrement(0);
save = captureState();
}
- char [] buffer = termAtt.resizeBuffer(oldLen + 1);
+ char[] buffer = termAtt.resizeBuffer(oldLen + 1);
buffer[oldLen] = markerChar;
reverse(buffer, 0, oldLen + 1);
posAtt.setPositionIncrement(origOffset);
- termAtt.copyBuffer(buffer, 0, oldLen +1);
+ termAtt.copyBuffer(buffer, 0, oldLen + 1);
return true;
}
-
/**
- * Partially reverses the given input buffer in-place from the given offset
- * up to the given length, keeping surrogate pairs in the correct (non-reversed) order.
+ * Partially reverses the given input buffer in-place from the given offset up to the given
+ * length, keeping surrogate pairs in the correct (non-reversed) order.
+ *
* @param buffer the input char array to reverse
* @param start the offset from where to reverse the buffer
- * @param len the length in the buffer up to where the
- * buffer should be reversed
+ * @param len the length in the buffer up to where the buffer should be reversed
*/
public static void reverse(final char[] buffer, final int start, final int len) {
/* modified version of Apache Harmony AbstractStringBuilder reverse0() */
- if (len < 2)
- return;
+ if (len < 2) return;
int end = (start + len) - 1;
char frontHigh = buffer[start];
char endLow = buffer[end];
@@ -99,14 +96,12 @@ public final class ReversedWildcardFilter extends TokenFilter {
for (int i = start; i < mid; ++i, --end) {
final char frontLow = buffer[i + 1];
final char endHigh = buffer[end - 1];
- final boolean surAtFront = allowFrontSur
- && Character.isSurrogatePair(frontHigh, frontLow);
+ final boolean surAtFront = allowFrontSur && Character.isSurrogatePair(frontHigh, frontLow);
if (surAtFront && (len < 3)) {
// nothing to do since surAtFront is allowed and 1 char left
return;
}
- final boolean surAtEnd = allowEndSur
- && Character.isSurrogatePair(endHigh, endLow);
+ final boolean surAtEnd = allowEndSur && Character.isSurrogatePair(endHigh, endLow);
allowFrontSur = allowEndSur = true;
if (surAtFront == surAtEnd) {
if (surAtFront) {
@@ -145,11 +140,10 @@ public final class ReversedWildcardFilter extends TokenFilter {
buffer[end] = allowFrontSur ? endLow : frontHigh;
}
}
-
+
@Override
public void reset() throws IOException {
super.reset();
save = null;
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java b/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java
index 4a8f362..d1706d1 100644
--- a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java
+++ b/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java
@@ -15,39 +15,41 @@
* limitations under the License.
*/
package org.apache.solr.analysis;
-import java.util.Map;
+import java.util.Map;
+import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
-import org.apache.lucene.analysis.TokenFilterFactory;
/**
- * Factory for {@link ReversedWildcardFilter}-s. When this factory is
- * added to an analysis chain, it will be used both for filtering the
- * tokens during indexing, and to determine the query processing of
- * this field during search.
+ * Factory for {@link ReversedWildcardFilter}-s. When this factory is added to an analysis chain, it
+ * will be used both for filtering the tokens during indexing, and to determine the query processing
+ * of this field during search.
+ *
* <p>This class supports the following init arguments:
+ *
* <ul>
- * <li><code>withOriginal</code> - if true, then produce both original and reversed tokens at
- * the same positions. If false, then produce only reversed tokens.</li>
- * <li><code>maxPosAsterisk</code> - maximum position (1-based) of the asterisk wildcard
- * ('*') that triggers the reversal of query term. Asterisk that occurs at
- * positions higher than this value will not cause the reversal of query term.
- * Defaults to 2, meaning that asterisks on positions 1 and 2 will cause
- * a reversal.</li>
- * <li><code>maxPosQuestion</code> - maximum position (1-based) of the question
- * mark wildcard ('?') that triggers the reversal of query term. Defaults to 1.
- * Set this to 0, and <code>maxPosAsterisk</code> to 1 to reverse only
- * pure suffix queries (i.e. ones with a single leading asterisk).</li>
- * <li><code>maxFractionAsterisk</code> - additional parameter that
- * triggers the reversal if asterisk ('*') position is less than this
- * fraction of the query token length. Defaults to 0.0f (disabled).</li>
- * <li><code>minTrailing</code> - minimum number of trailing characters in query
- * token after the last wildcard character. For good performance this should be
- * set to a value larger than 1. Defaults to 2.
+ * <li><code>withOriginal</code> - if true, then produce both original and reversed tokens at the
+ * same positions. If false, then produce only reversed tokens.
+ * <li><code>maxPosAsterisk</code> - maximum position (1-based) of the asterisk wildcard ('*')
+ * that triggers the reversal of query term. Asterisk that occurs at positions higher than
+ * this value will not cause the reversal of query term. Defaults to 2, meaning that asterisks
+ * on positions 1 and 2 will cause a reversal.
+ * <li><code>maxPosQuestion</code> - maximum position (1-based) of the question mark wildcard
+ * ('?') that triggers the reversal of query term. Defaults to 1. Set this to 0, and <code>
+ * maxPosAsterisk</code> to 1 to reverse only pure suffix queries (i.e. ones with a single
+ * leading asterisk).
+ * <li><code>maxFractionAsterisk</code> - additional parameter that triggers the reversal if
+ * asterisk ('*') position is less than this fraction of the query token length. Defaults to
+ * 0.0f (disabled).
+ * <li><code>minTrailing</code> - minimum number of trailing characters in query token after the
+ * last wildcard character. For good performance this should be set to a value larger than 1.
+ * Defaults to 2.
* </ul>
- * Note 1: This filter always reverses input tokens during indexing.
- * Note 2: Query tokens without wildcard characters will never be reversed.
+ *
+ * Note 1: This filter always reverses input tokens during indexing. Note 2: Query tokens without
+ * wildcard characters will never be reversed.
+ *
* <pre class="prettyprint" >
* <fieldType name="text_rvswc" class="solr.TextField" positionIncrementGap="100">
* <analyzer type="index">
@@ -67,7 +69,7 @@ public class ReversedWildcardFilterFactory extends TokenFilterFactory {
/** SPI name */
public static final String NAME = "reversedWildcard";
-
+
private char markerChar = ReverseStringFilter.START_OF_HEADING_MARKER;
private boolean withOriginal;
private int maxPosAsterisk;
@@ -76,7 +78,7 @@ public class ReversedWildcardFilterFactory extends TokenFilterFactory {
private float maxFractionAsterisk;
/** Creates a new ReversedWildcardFilterFactory */
- public ReversedWildcardFilterFactory(Map<String,String> args) {
+ public ReversedWildcardFilterFactory(Map<String, String> args) {
super(args);
withOriginal = getBoolean(args, "withOriginal", true);
maxPosAsterisk = getInt(args, "maxPosAsterisk", 2);
@@ -97,11 +99,11 @@ public class ReversedWildcardFilterFactory extends TokenFilterFactory {
public TokenStream create(TokenStream input) {
return new ReversedWildcardFilter(input, withOriginal, markerChar);
}
-
+
/**
- * This method encapsulates the logic that determines whether
- * a query token should be reversed in order to use the
- * reversed terms in the index.
+ * This method encapsulates the logic that determines whether a query token should be reversed in
+ * order to use the reversed terms in the index.
+ *
* @param token input token.
* @return true if input token should be reversed, false otherwise.
*/
@@ -125,22 +127,22 @@ public class ReversedWildcardFilterFactory extends TokenFilterFactory {
} else {
pos = posA;
}
- if (len - lastPos < minTrailing) { // too few trailing chars
+ if (len - lastPos < minTrailing) { // too few trailing chars
return false;
}
- if (posQ != -1 && posQ < maxPosQuestion) { // leading '?'
+ if (posQ != -1 && posQ < maxPosQuestion) { // leading '?'
return true;
}
if (posA != -1 && posA < maxPosAsterisk) { // leading '*'
return true;
}
// '*' in the leading part
- if (maxFractionAsterisk > 0.0f && pos < (float)token.length() * maxFractionAsterisk) {
+ if (maxFractionAsterisk > 0.0f && pos < (float) token.length() * maxFractionAsterisk) {
return true;
}
return false;
}
-
+
public char getMarkerChar() {
return markerChar;
}
diff --git a/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java b/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java
index 10c1b7c..c817a3a 100644
--- a/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java
+++ b/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java
@@ -16,12 +16,10 @@
*/
package org.apache.solr.analysis;
-import org.apache.lucene.analysis.Analyzer;
-
import java.io.Reader;
+import org.apache.lucene.analysis.Analyzer;
/**
- *
* @since 3.1
*/
public abstract class SolrAnalyzer extends Analyzer {
diff --git a/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java b/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java
index 611e3cf..535787e 100644
--- a/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java
+++ b/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java
@@ -17,30 +17,29 @@
package org.apache.solr.analysis;
import java.io.Reader;
-
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharFilterFactory;
+import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.TokenizerFactory;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.custom.CustomAnalyzer;
-import org.apache.lucene.analysis.CharFilterFactory;
-import org.apache.lucene.analysis.TokenFilterFactory;
-import org.apache.lucene.analysis.TokenizerFactory;
/**
- * An analyzer that uses a tokenizer and a list of token filters to
- * create a TokenStream.
+ * An analyzer that uses a tokenizer and a list of token filters to create a TokenStream.
+ *
+ * <p>It should probably be replaced with {@link CustomAnalyzer}.
*
- * It should probably be replaced with {@link CustomAnalyzer}.
* @since 3.1
*/
public final class TokenizerChain extends SolrAnalyzer {
private static final CharFilterFactory[] EMPTY_CHAR_FITLERS = new CharFilterFactory[0];
private static final TokenFilterFactory[] EMPTY_TOKEN_FITLERS = new TokenFilterFactory[0];
-
- final private CharFilterFactory[] charFilters;
- final private TokenizerFactory tokenizer;
- final private TokenFilterFactory[] filters;
+
+ private final CharFilterFactory[] charFilters;
+ private final TokenizerFactory tokenizer;
+ private final TokenFilterFactory[] filters;
/** Copy from CustomAnalyzer. */
public TokenizerChain(CustomAnalyzer customAnalyzer) {
@@ -52,41 +51,56 @@ public final class TokenizerChain extends SolrAnalyzer {
assert customAnalyzer.getOffsetGap(null) == 1; // note: we don't support setting the offset gap
}
- /**
+ /**
* Creates a new TokenizerChain w/o any CharFilterFactories.
*
* @param tokenizer Factory for the Tokenizer to use, must not be null.
* @param filters Factories for the TokenFilters to use - if null, will be treated as if empty.
*/
public TokenizerChain(TokenizerFactory tokenizer, TokenFilterFactory[] filters) {
- this(null,tokenizer,filters);
+ this(null, tokenizer, filters);
}
- /**
+ /**
* Creates a new TokenizerChain.
*
- * @param charFilters Factories for the CharFilters to use, if any - if null, will be treated as if empty.
+ * @param charFilters Factories for the CharFilters to use, if any - if null, will be treated as
+ * if empty.
* @param tokenizer Factory for the Tokenizer to use, must not be null.
- * @param filters Factories for the TokenFilters to use if any- if null, will be treated as if empty.
+ * @param filters Factories for the TokenFilters to use if any- if null, will be treated as if
+ * empty.
*/
- public TokenizerChain(CharFilterFactory[] charFilters, TokenizerFactory tokenizer, TokenFilterFactory[] filters) {
+ public TokenizerChain(
+ CharFilterFactory[] charFilters, TokenizerFactory tokenizer, TokenFilterFactory[] filters) {
charFilters = null == charFilters ? EMPTY_CHAR_FITLERS : charFilters;
filters = null == filters ? EMPTY_TOKEN_FITLERS : filters;
if (null == tokenizer) {
throw new NullPointerException("TokenizerFactory must not be null");
}
-
+
this.charFilters = charFilters;
this.tokenizer = tokenizer;
this.filters = filters;
}
- /** @return array of CharFilterFactories, may be empty but never null */
- public CharFilterFactory[] getCharFilterFactories() { return charFilters; }
- /** @return the TokenizerFactory in use, will never be null */
- public TokenizerFactory getTokenizerFactory() { return tokenizer; }
- /** @return array of TokenFilterFactories, may be empty but never null */
- public TokenFilterFactory[] getTokenFilterFactories() { return filters; }
+ /**
+ * @return array of CharFilterFactories, may be empty but never null
+ */
+ public CharFilterFactory[] getCharFilterFactories() {
+ return charFilters;
+ }
+ /**
+ * @return the TokenizerFactory in use, will never be null
+ */
+ public TokenizerFactory getTokenizerFactory() {
+ return tokenizer;
+ }
+ /**
+ * @return array of TokenFilterFactories, may be empty but never null
+ */
+ public TokenFilterFactory[] getTokenFilterFactories() {
+ return filters;
+ }
@Override
public Reader initReader(String fieldName, Reader reader) {
@@ -132,12 +146,12 @@ public final class TokenizerChain extends SolrAnalyzer {
@Override
public String toString() {
StringBuilder sb = new StringBuilder("TokenizerChain(");
- for (CharFilterFactory filter: charFilters) {
+ for (CharFilterFactory filter : charFilters) {
sb.append(filter);
sb.append(", ");
}
sb.append(tokenizer);
- for (TokenFilterFactory filter: filters) {
+ for (TokenFilterFactory filter : filters) {
sb.append(", ");
sb.append(filter);
}
@@ -170,5 +184,4 @@ public final class TokenizerChain extends SolrAnalyzer {
}
};
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/analysis/package-info.java b/solr/core/src/java/org/apache/solr/analysis/package-info.java
index e8a48bf..2a9e652 100644
--- a/solr/core/src/java/org/apache/solr/analysis/package-info.java
+++ b/solr/core/src/java/org/apache/solr/analysis/package-info.java
@@ -14,13 +14,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
-/**
- * Factories and classes specific to text analysis and the creation of {@link org.apache.lucene.analysis.TokenStream}s
- * <p>
- * See {@link org.apache.lucene.analysis} for additional details.
+
+/**
+ * Factories and classes specific to text analysis and the creation of {@link
+ * org.apache.lucene.analysis.TokenStream}s
+ *
+ * <p>See {@link org.apache.lucene.analysis} for additional details.
*/
package org.apache.solr.analysis;
-
-
-
diff --git a/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java b/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java
index 2aa65fe..905157c 100644
--- a/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java
+++ b/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java
@@ -17,7 +17,9 @@
package org.apache.solr.api;
-
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.MapperFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandle;
@@ -32,10 +34,6 @@ import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.MapperFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SpecProvider;
@@ -54,24 +52,22 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * This class implements an Api just from an annotated java class
- * The class must have an annotation {@link EndPoint}
- * Each method must have an annotation {@link Command}
- * The methods that implement a command should have the first 2 parameters
- * {@link SolrQueryRequest} and {@link SolrQueryResponse} or it may optionally
- * have a third parameter which could be a java class annotated with jackson annotations.
- * The third parameter is only valid if it is using a json command payload
+ * This class implements an Api just from an annotated java class The class must have an annotation
+ * {@link EndPoint} Each method must have an annotation {@link Command} The methods that implement a
+ * command should have the first 2 parameters {@link SolrQueryRequest} and {@link SolrQueryResponse}
+ * or it may optionally have a third parameter which could be a java class annotated with jackson
+ * annotations. The third parameter is only valid if it is using a json command payload
*/
-
-public class AnnotatedApi extends Api implements PermissionNameProvider , Closeable {
+public class AnnotatedApi extends Api implements PermissionNameProvider, Closeable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- private static final ObjectMapper mapper = SolrJacksonAnnotationInspector.createObjectMapper()
+ private static final ObjectMapper mapper =
+ SolrJacksonAnnotationInspector.createObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.disable(MapperFeature.AUTO_DETECT_FIELDS);
public static final String ERR = "Error executing commands :";
private EndPoint endPoint;
- private final Map<String, Cmd> commands ;
+ private final Map<String, Cmd> commands;
private final Cmd singletonCommand;
private final Api fallback;
@@ -81,16 +77,17 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
if (value.obj instanceof Closeable) {
((Closeable) value.obj).close();
}
- break;// all objects are same so close only one
+ break; // all objects are same so close only one
}
-
}
public EndPoint getEndPoint() {
return endPoint;
}
- public Map<String, Cmd> getCommands() { return commands; }
+ public Map<String, Cmd> getCommands() {
+ return commands;
+ }
public static List<Api> getApis(Object obj) {
return getApis(obj.getClass(), obj, true);
@@ -98,18 +95,20 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
/**
* Get a list of Api-s supported by this class.
+ *
* @param theClass class
* @param obj object of this class (may be null)
* @param allowEmpty if false then an exception is thrown if no Api-s can be retrieved, if true
- * then absence of Api-s is silently ignored.
+ * then absence of Api-s is silently ignored.
* @return list of discovered Api-s
*/
- public static List<Api> getApis(Class<?> theClass , Object obj, boolean allowEmpty) {
+ public static List<Api> getApis(Class<?> theClass, Object obj, boolean allowEmpty) {
Class<?> klas = null;
try {
klas = MethodHandles.publicLookup().accessClass(theClass);
} catch (IllegalAccessException e) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Method may be non-public/inaccessible", e);
+ throw new SolrException(
+ SolrException.ErrorCode.SERVER_ERROR, "Method may be non-public/inaccessible", e);
}
if (klas.isAnnotationPresent(EndPoint.class)) {
EndPoint endPoint = klas.getAnnotation(EndPoint.class);
@@ -147,7 +146,8 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
}
}
- protected AnnotatedApi(SpecProvider specProvider, EndPoint endPoint, Map<String, Cmd> commands, Api fallback) {
+ protected AnnotatedApi(
+ SpecProvider specProvider, EndPoint endPoint, Map<String, Cmd> commands, Api fallback) {
super(specProvider);
this.endPoint = endPoint;
this.fallback = fallback;
@@ -168,7 +168,9 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
methods.add(method.name());
}
map.put("methods", methods);
- map.put("url", new ValidatingJsonMap(Collections.singletonMap("paths", Arrays.asList(endPoint.path()))));
+ map.put(
+ "url",
+ new ValidatingJsonMap(Collections.singletonMap("paths", Arrays.asList(endPoint.path()))));
Map<String, Object> cmds = new HashMap<>();
for (Method method : m) {
@@ -182,10 +184,8 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
}
return new ValidatingJsonMap(map);
};
-
}
-
@Override
public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
if (singletonCommand != null) {
@@ -206,7 +206,9 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
fallback.call(req, rsp);
return;
} else {
- throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, "Error processing commands",
+ throw new ApiBag.ExceptionWithErrObject(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Error processing commands",
CommandOperation.captureErrors(cmds));
}
}
@@ -221,7 +223,6 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
log.error("{}{}", ERR, Utils.toJSONString(errs));
throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, ERR, errs);
}
-
}
static class Cmd {
@@ -233,7 +234,6 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
Class<?> parameterClass;
boolean isWrappedInPayloadObj = false;
-
Cmd(String command, Object obj, Method method) {
this.command = command;
this.obj = obj;
@@ -247,7 +247,8 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
if (parameterTypes.length == 1) {
readPayloadType(method.getGenericParameterTypes()[0]);
} else if (parameterTypes.length == 3) {
- if (parameterTypes[0] != SolrQueryRequest.class || parameterTypes[1] != SolrQueryResponse.class) {
+ if (parameterTypes[0] != SolrQueryRequest.class
+ || parameterTypes[1] != SolrQueryResponse.class) {
throw new RuntimeException("Invalid params for method " + method);
}
Type t = method.getGenericParameterTypes()[2];
@@ -263,8 +264,8 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
ParameterizedType typ = (ParameterizedType) t;
if (typ.getRawType() == PayloadObj.class) {
isWrappedInPayloadObj = true;
- if(typ.getActualTypeArguments().length == 0){
- //this is a raw type
+ if (typ.getActualTypeArguments().length == 0) {
+ // this is a raw type
parameterClass = Map.class;
return;
}
@@ -281,15 +282,14 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
}
}
-
void invoke(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation cmd) {
Object original = null;
try {
Object o = null;
String commandName = null;
- if(paramsCount == 1) {
- if(cmd == null) {
- if(parameterClass != null) {
+ if (paramsCount == 1) {
+ if (cmd == null) {
+ if (parameterClass != null) {
try {
ContentStream stream = req.getContentStreams().iterator().next();
o = mapper.readValue(stream.getStream(), parameterClass);
@@ -317,7 +317,8 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
o = mapper.readValue(Utils.toJSONString(o), parameterClass);
}
if (isWrappedInPayloadObj) {
- PayloadObj<Object> payloadObj = new PayloadObj<>(cmd.name, cmd.getCommandData(), o, req, rsp);
+ PayloadObj<Object> payloadObj =
+ new PayloadObj<>(cmd.name, cmd.getCommandData(), o, req, rsp);
cmd = payloadObj;
method.invoke(obj, req, rsp, payloadObj);
} else {
@@ -332,12 +333,13 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
log.error("Error executing command : ", e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
-
}
private void checkForErrorInPayload(CommandOperation cmd) {
if (cmd.hasError()) {
- throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, "Error executing command",
+ throw new ApiBag.ExceptionWithErrObject(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Error executing command",
CommandOperation.captureErrors(Collections.singletonList(cmd)));
}
}
@@ -346,8 +348,9 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
public static Map<String, Object> createSchema(Method m) {
Type[] types = m.getGenericParameterTypes();
Type t = null;
- if (types.length == 3) t = types[2]; // (SolrQueryRequest req, SolrQueryResponse rsp, PayloadObj<PluginMeta>)
- if(types.length == 1) t = types[0];// (PayloadObj<PluginMeta>)
+ if (types.length == 3)
+ t = types[2]; // (SolrQueryRequest req, SolrQueryResponse rsp, PayloadObj<PluginMeta>)
+ if (types.length == 1) t = types[0]; // (PayloadObj<PluginMeta>)
if (t != null) {
if (t instanceof ParameterizedType) {
ParameterizedType typ = (ParameterizedType) t;
@@ -356,9 +359,7 @@ public class AnnotatedApi extends Api implements PermissionNameProvider , Closea
}
}
return JsonSchemaCreator.getSchema(t);
-
}
return null;
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/api/Api.java b/solr/core/src/java/org/apache/solr/api/Api.java
index d2c468c..91389e7 100644
--- a/solr/core/src/java/org/apache/solr/api/Api.java
+++ b/solr/core/src/java/org/apache/solr/api/Api.java
@@ -17,18 +17,17 @@
package org.apache.solr.api;
-import java.util.Map;
-
import com.google.common.collect.ImmutableMap;
+import java.util.Map;
import org.apache.solr.common.SpecProvider;
+import org.apache.solr.common.util.JsonSchemaValidator;
import org.apache.solr.common.util.ValidatingJsonMap;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.common.util.JsonSchemaValidator;
-/** Every version 2 API must extend the this class. It's mostly like a request handler
- * but it has extra methods to provide the json schema of the end point
- *
+/**
+ * Every version 2 API must extend the this class. It's mostly like a request handler but it has
+ * extra methods to provide the json schema of the end point
*/
public abstract class Api implements SpecProvider {
protected SpecProvider spec;
@@ -38,31 +37,28 @@ public abstract class Api implements SpecProvider {
this.spec = spec;
}
- /**This method helps to cache the schema validator object
- */
+ /** This method helps to cache the schema validator object */
public Map<String, JsonSchemaValidator> getCommandSchema() {
if (commandSchema == null) {
synchronized (this) {
- if(commandSchema == null) {
+ if (commandSchema == null) {
ValidatingJsonMap commands = getSpec().getMap("commands", null);
- commandSchema = commands != null ?
- ImmutableMap.copyOf(ApiBag.getParsedSchema(commands)) :
- ImmutableMap.of();
+ commandSchema =
+ commands != null
+ ? ImmutableMap.copyOf(ApiBag.getParsedSchema(commands))
+ : ImmutableMap.of();
}
}
}
return commandSchema;
}
- /** The method that gets called for each request
- */
- public abstract void call(SolrQueryRequest req , SolrQueryResponse rsp);
+ /** The method that gets called for each request */
+ public abstract void call(SolrQueryRequest req, SolrQueryResponse rsp);
- /**Get the specification of the API as a Map
- */
+ /** Get the specification of the API as a Map */
@Override
public ValidatingJsonMap getSpec() {
return spec.getSpec();
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/api/ApiBag.java b/solr/core/src/java/org/apache/solr/api/ApiBag.java
index 0e495bd..5a790e9 100644
--- a/solr/core/src/java/org/apache/solr/api/ApiBag.java
+++ b/solr/core/src/java/org/apache/solr/api/ApiBag.java
@@ -17,6 +17,15 @@
package org.apache.solr.api;
+import static org.apache.solr.client.solrj.SolrRequest.SUPPORTED_METHODS;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.StrUtils.formatString;
+import static org.apache.solr.common.util.ValidatingJsonMap.ENUM_OF;
+import static org.apache.solr.common.util.ValidatingJsonMap.NOT_NULL;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
@@ -29,10 +38,6 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SpecProvider;
@@ -53,12 +58,6 @@ import org.apache.solr.security.PermissionNameProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.client.solrj.SolrRequest.SUPPORTED_METHODS;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.StrUtils.formatString;
-import static org.apache.solr.common.util.ValidatingJsonMap.ENUM_OF;
-import static org.apache.solr.common.util.ValidatingJsonMap.NOT_NULL;
-
public class ApiBag {
private final boolean isCoreSpecific;
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -69,7 +68,9 @@ public class ApiBag {
this.isCoreSpecific = isCoreSpecific;
}
- /**Register a POJO annotated with {@link EndPoint}
+ /**
+ * Register a POJO annotated with {@link EndPoint}
+ *
* @param o the instance to be used for invocations
*/
public synchronized List<Api> registerObject(Object o) {
@@ -79,28 +80,34 @@ public class ApiBag {
}
return l;
}
+
public synchronized void register(Api api) {
register(api, Collections.emptyMap());
}
+
public synchronized void register(Api api, Map<String, String> nameSubstitutes) {
try {
validateAndRegister(api, nameSubstitutes);
} catch (Exception e) {
- log.error("Unable to register plugin: {} with spec {} :", api.getClass().getName(), Utils.toJSONString(api.getSpec()), e);
+ log.error(
+ "Unable to register plugin: {} with spec {} :",
+ api.getClass().getName(),
+ Utils.toJSONString(api.getSpec()),
+ e);
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
-
}
}
/**
- * PathTrie extension that combines the commands in the API being registered with any that have already been registered.
+ * PathTrie extension that combines the commands in the API being registered with any that have
+ * already been registered.
*
- * This is only possible currently for AnnotatedApis. All other Api implementations will resort to the default
- * "overwriting" behavior of PathTrie
+ * <p>This is only possible currently for AnnotatedApis. All other Api implementations will resort
+ * to the default "overwriting" behavior of PathTrie
*/
class CommandAggregatingPathTrie extends PathTrie<Api> {
@@ -115,8 +122,8 @@ public class ApiBag {
return;
}
- // If 'o' and 'node.obj' aren't both AnnotatedApi's then we can't aggregate the commands, so fallback to the
- // default behavior
+ // If 'o' and 'node.obj' aren't both AnnotatedApi's then we can't aggregate the commands, so
+ // fallback to the default behavior
if ((!(o instanceof AnnotatedApi)) || (!(node.getObject() instanceof AnnotatedApi))) {
super.attachValueToNode(node, o);
return;
@@ -125,10 +132,12 @@ public class ApiBag {
final AnnotatedApi beingRegistered = (AnnotatedApi) o;
final AnnotatedApi alreadyRegistered = (AnnotatedApi) node.getObject();
if (alreadyRegistered instanceof CommandAggregatingAnnotatedApi) {
- final CommandAggregatingAnnotatedApi alreadyRegisteredAsCollapsing = (CommandAggregatingAnnotatedApi) alreadyRegistered;
+ final CommandAggregatingAnnotatedApi alreadyRegisteredAsCollapsing =
+ (CommandAggregatingAnnotatedApi) alreadyRegistered;
alreadyRegisteredAsCollapsing.combineWith(beingRegistered);
} else {
- final CommandAggregatingAnnotatedApi wrapperApi = new CommandAggregatingAnnotatedApi(alreadyRegistered);
+ final CommandAggregatingAnnotatedApi wrapperApi =
+ new CommandAggregatingAnnotatedApi(alreadyRegistered);
wrapperApi.combineWith(beingRegistered);
node.setObject(wrapperApi);
}
@@ -173,7 +182,8 @@ public class ApiBag {
for (String method : methods) {
PathTrie<Api> registry = apis.get(method);
- if (registry == null) apis.put(method, registry = new CommandAggregatingPathTrie(ImmutableSet.of("_introspect")));
+ if (registry == null)
+ apis.put(method, registry = new CommandAggregatingPathTrie(ImmutableSet.of("_introspect")));
ValidatingJsonMap url = spec.getMap("url", NOT_NULL);
ValidatingJsonMap params = url.getMap("params", null);
if (params != null) {
@@ -190,7 +200,8 @@ public class ApiBag {
if (!wildCardNames.contains(o.toString()))
throw new RuntimeException("" + o + " is not a valid part name");
ValidatingJsonMap pathMeta = parts.getMap(o.toString(), NOT_NULL);
- pathMeta.get("type", ENUM_OF, ImmutableSet.of("enum", "string", "int", "number", "boolean"));
+ pathMeta.get(
+ "type", ENUM_OF, ImmutableSet.of("enum", "string", "int", "number", "boolean"));
}
}
verifyCommands(api.getSpec());
@@ -201,7 +212,8 @@ public class ApiBag {
}
}
- public static void registerIntrospect(Map<String, String> nameSubstitutes, PathTrie<Api> registry, String path, Api introspect) {
+ public static void registerIntrospect(
+ Map<String, String> nameSubstitutes, PathTrie<Api> registry, String path, Api introspect) {
List<String> l = PathTrie.getPathSegments(path);
registerIntrospect(l, registry, nameSubstitutes, introspect);
int lastIdx = l.size() - 1;
@@ -214,7 +226,8 @@ public class ApiBag {
}
}
- static void registerIntrospect(List<String> l, PathTrie<Api> registry, Map<String, String> substitutes, Api introspect) {
+ static void registerIntrospect(
+ List<String> l, PathTrie<Api> registry, Map<String, String> substitutes, Api introspect) {
ArrayList<String> copy = new ArrayList<>(l);
copy.add("_introspect");
registry.insert(copy, substitutes, introspect);
@@ -244,7 +257,10 @@ public class ApiBag {
String cmd = req.getParams().get("command");
ValidatingJsonMap result = null;
if (cmd == null) {
- result = isCoreSpecific ? ValidatingJsonMap.getDeepCopy(baseApi.getSpec(), 5, true) : baseApi.getSpec();
+ result =
+ isCoreSpecific
+ ? ValidatingJsonMap.getDeepCopy(baseApi.getSpec(), 5, true)
+ : baseApi.getSpec();
} else {
ValidatingJsonMap specCopy = ValidatingJsonMap.getDeepCopy(baseApi.getSpec(), 5, true);
ValidatingJsonMap commands = specCopy.getMap("commands", null);
@@ -255,19 +271,20 @@ public class ApiBag {
} else {
specCopy.put("commands", Collections.singletonMap(cmd, m));
}
-
}
result = specCopy;
}
if (isCoreSpecific) {
- List<String> pieces = req.getHttpSolrCall() == null ? null : ((V2HttpCall) req.getHttpSolrCall()).getPathSegments();
+ List<String> pieces =
+ req.getHttpSolrCall() == null
+ ? null
+ : ((V2HttpCall) req.getHttpSolrCall()).getPathSegments();
if (pieces != null) {
String prefix = "/" + pieces.get(0) + "/" + pieces.get(1);
List<String> paths = result.getMap("url", NOT_NULL).getList("paths", NOT_NULL);
- result.getMap("url", NOT_NULL).put("paths",
- paths.stream()
- .map(s -> prefix + s)
- .collect(Collectors.toList()));
+ result
+ .getMap("url", NOT_NULL)
+ .put("paths", paths.stream().map(s -> prefix + s).collect(Collectors.toList()));
}
}
List<Object> l = (List<Object>) rsp.getValues().get("spec");
@@ -280,7 +297,7 @@ public class ApiBag {
Map<String, JsonSchemaValidator> validators = new HashMap<>();
for (Object o : commands.entrySet()) {
@SuppressWarnings("unchecked")
- Map.Entry<String, Map<?,?>> cmd = (Map.Entry<String, Map<?,?>>) o;
+ Map.Entry<String, Map<?, ?>> cmd = (Map.Entry<String, Map<?, ?>>) o;
try {
validators.put(cmd.getKey(), new JsonSchemaValidator(cmd.getValue()));
} catch (Exception e) {
@@ -290,12 +307,10 @@ public class ApiBag {
return validators;
}
-
private void verifyCommands(ValidatingJsonMap spec) {
ValidatingJsonMap commands = spec.getMap("commands", null);
if (commands == null) return;
getParsedSchema(commands);
-
}
private Set<String> getWildCardNames(List<String> paths) {
@@ -310,7 +325,6 @@ public class ApiBag {
return wildCardNames;
}
-
public Api lookup(String path, String httpMethod, Map<String, String> parts) {
if (httpMethod == null) {
for (PathTrie<Api> trie : apis.values()) {
@@ -353,10 +367,10 @@ public class ApiBag {
return b.build();
}
-
public static final SpecProvider EMPTY_SPEC = () -> ValidatingJsonMap.EMPTY;
public static final String HANDLER_NAME = "handlerName";
- public static final Set<String> KNOWN_TYPES = ImmutableSet.of("string", "boolean", "list", "int", "double", "object");
+ public static final Set<String> KNOWN_TYPES =
+ ImmutableSet.of("string", "boolean", "list", "int", "double", "object");
public PathTrie<Api> getRegistry(String method) {
return apis.get(method);
@@ -365,31 +379,36 @@ public class ApiBag {
public void registerLazy(PluginBag.PluginHolder<SolrRequestHandler> holder, PluginInfo info) {
String specName = info.attributes.get("spec");
if (specName == null) specName = "emptySpec";
- register(new LazyLoadedApi(Utils.getSpec(specName), holder), Collections.singletonMap(HANDLER_NAME, info.attributes.get(NAME)));
+ register(
+ new LazyLoadedApi(Utils.getSpec(specName), holder),
+ Collections.singletonMap(HANDLER_NAME, info.attributes.get(NAME)));
}
public static SpecProvider constructSpec(PluginInfo info) {
Object specObj = info == null ? null : info.attributes.get("spec");
if (specObj == null) specObj = "emptySpec";
if (specObj instanceof Map) {
- // Value from Map<String,String> can be a Map because in PluginInfo(String, Map) we assign a Map<String, Object>
+ // Value from Map<String,String> can be a Map because in PluginInfo(String, Map) we assign a
+ // Map<String, Object>
// assert false : "got a map when this should only be Strings";
- Map<?,?> map = (Map<?,?>) specObj;
+ Map<?, ?> map = (Map<?, ?>) specObj;
return () -> ValidatingJsonMap.getDeepCopy(map, 4, false);
} else {
return Utils.getSpec((String) specObj);
}
}
- public static List<CommandOperation> getCommandOperations(ContentStream stream, Map<String, JsonSchemaValidator> validators, boolean validate) {
+ public static List<CommandOperation> getCommandOperations(
+ ContentStream stream, Map<String, JsonSchemaValidator> validators, boolean validate) {
List<CommandOperation> parsedCommands = null;
try {
- parsedCommands = CommandOperation.readCommands(Collections.singleton(stream), new NamedList<>());
+ parsedCommands =
+ CommandOperation.readCommands(Collections.singleton(stream), new NamedList<>());
} catch (IOException e) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to parse commands",e);
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to parse commands", e);
}
- if (validators == null || !validate) { // no validation possible because we do not have a spec
+ if (validators == null || !validate) { // no validation possible because we do not have a spec
return parsedCommands;
}
@@ -398,23 +417,25 @@ public class ApiBag {
for (CommandOperation cmd : commandsCopy) {
JsonSchemaValidator validator = validators.get(cmd.name);
if (validator == null) {
- cmd.addError(formatString("Unknown operation ''{0}'' available ops are ''{1}''", cmd.name,
- validators.keySet()));
+ cmd.addError(
+ formatString(
+ "Unknown operation ''{0}'' available ops are ''{1}''",
+ cmd.name, validators.keySet()));
continue;
} else {
List<String> errs = validator.validateJson(cmd.getCommandData());
- if (errs != null){
+ if (errs != null) {
// otherwise swallowed in solrj tests, and just get "Error in command payload" in test log
// which is quite unhelpful.
- log.error("Command errors for {}:{}", cmd.name, errs );
+ log.error("Command errors for {}:{}", cmd.name, errs);
for (String err : errs) cmd.addError(err);
}
}
-
}
List<Map<String, Object>> errs = CommandOperation.captureErrors(commandsCopy);
if (!errs.isEmpty()) {
- throw new ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, "Error in command payload", errs);
+ throw new ExceptionWithErrObject(
+ SolrException.ErrorCode.BAD_REQUEST, "Error in command payload", errs);
}
return commandsCopy;
}
@@ -442,7 +463,8 @@ public class ApiBag {
private final PluginBag.PluginHolder<SolrRequestHandler> holder;
private Api delegate;
- protected LazyLoadedApi(SpecProvider specProvider, PluginBag.PluginHolder<SolrRequestHandler> lazyPluginHolder) {
+ protected LazyLoadedApi(
+ SpecProvider specProvider, PluginBag.PluginHolder<SolrRequestHandler> lazyPluginHolder) {
super(specProvider);
this.holder = lazyPluginHolder;
}
@@ -455,5 +477,4 @@ public class ApiBag {
delegate.call(req, rsp);
}
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/api/ApiSupport.java b/solr/core/src/java/org/apache/solr/api/ApiSupport.java
index ca1e866..7be5f68 100644
--- a/solr/core/src/java/org/apache/solr/api/ApiSupport.java
+++ b/solr/core/src/java/org/apache/solr/api/ApiSupport.java
@@ -19,28 +19,23 @@ package org.apache.solr.api;
import java.util.Collection;
-/**The interface that is implemented by a request handler to support the V2 end point
- *
- */
+/** The interface that is implemented by a request handler to support the V2 end point */
public interface ApiSupport {
- /**It is possible to support multiple v2 apis by a single requesthandler
+ /**
+ * It is possible to support multiple v2 apis by a single requesthandler
*
* @return the list of v2 api implementations
*/
Collection<Api> getApis();
- /**Whether this should be made available at the regular legacy path
- */
+ /** Whether this should be made available at the regular legacy path */
default Boolean registerV1() {
return Boolean.TRUE;
}
- /**Whether this request handler must be made available at the /v2/ path
- */
+ /** Whether this request handler must be made available at the /v2/ path */
default Boolean registerV2() {
return Boolean.FALSE;
}
-
-
}
diff --git a/solr/core/src/java/org/apache/solr/api/Command.java b/solr/core/src/java/org/apache/solr/api/Command.java
index 25de077..61aca7c 100644
--- a/solr/core/src/java/org/apache/solr/api/Command.java
+++ b/solr/core/src/java/org/apache/solr/api/Command.java
@@ -25,11 +25,9 @@ import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @interface Command {
- /**if this is not a json command , leave it empty.
- * Keep in mind that you cannot have duplicates.
+ /**
+ * if this is not a json command , leave it empty. Keep in mind that you cannot have duplicates.
* Only one method per name
- *
*/
String name() default "";
-
}
diff --git a/solr/core/src/java/org/apache/solr/api/ConfigurablePlugin.java b/solr/core/src/java/org/apache/solr/api/ConfigurablePlugin.java
index 0d9a183..6ce06c2 100644
--- a/solr/core/src/java/org/apache/solr/api/ConfigurablePlugin.java
+++ b/solr/core/src/java/org/apache/solr/api/ConfigurablePlugin.java
@@ -19,14 +19,16 @@ package org.apache.solr.api;
import org.apache.solr.common.MapWriter;
-/**Implement this interface if your plugin needs to accept some configuration
- *
+/**
+ * Implement this interface if your plugin needs to accept some configuration
+ *
* @param <T> the configuration Object type
*/
public interface ConfigurablePlugin<T extends MapWriter> {
/**
* This is invoked soon after the Object is initialized.
+ *
* @param cfg value deserialized from JSON
*/
void configure(T cfg);
diff --git a/solr/core/src/java/org/apache/solr/api/ContainerPluginsRegistry.java b/solr/core/src/java/org/apache/solr/api/ContainerPluginsRegistry.java
index 7724d77..74affd6 100644
--- a/solr/core/src/java/org/apache/solr/api/ContainerPluginsRegistry.java
+++ b/solr/core/src/java/org/apache/solr/api/ContainerPluginsRegistry.java
@@ -17,6 +17,12 @@
package org.apache.solr.api;
+import static org.apache.lucene.util.IOUtils.closeWhileHandlingException;
+
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.MapperFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.annotations.VisibleForTesting;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
@@ -28,11 +34,6 @@ import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Phaser;
import java.util.function.Supplier;
-
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.MapperFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.lucene.util.ResourceLoaderAware;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.request.beans.PluginMeta;
@@ -55,22 +56,21 @@ import org.apache.solr.util.SolrJacksonAnnotationInspector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.lucene.util.IOUtils.closeWhileHandlingException;
-
/**
- * This class manages the container-level plugins and their Api-s. It is
- * responsible for adding / removing / replacing the plugins according to the updated
- * configuration obtained from {@link ContainerPluginsApi#plugins(Supplier)}.
- * <p>Plugins instantiated by this class may implement zero or more {@link Api}-s, which
- * are then registered in the CoreContainer {@link ApiBag}. They may be also post-processed
- * for additional functionality by {@link PluginRegistryListener}-s registered with
- * this class.</p>
+ * This class manages the container-level plugins and their Api-s. It is responsible for adding /
+ * removing / replacing the plugins according to the updated configuration obtained from {@link
+ * ContainerPluginsApi#plugins(Supplier)}.
+ *
+ * <p>Plugins instantiated by this class may implement zero or more {@link Api}-s, which are then
+ * registered in the CoreContainer {@link ApiBag}. They may be also post-processed for additional
+ * functionality by {@link PluginRegistryListener}-s registered with this class.
*/
public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapWriter, Closeable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- private static final ObjectMapper mapper = SolrJacksonAnnotationInspector.createObjectMapper()
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
+ private static final ObjectMapper mapper =
+ SolrJacksonAnnotationInspector.createObjectMapper()
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.disable(MapperFeature.AUTO_DETECT_FIELDS);
private final List<PluginRegistryListener> listeners = new CopyOnWriteArrayList<>();
@@ -88,16 +88,16 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
Phaser localPhaser = phaser; // volatile read
if (localPhaser != null) {
assert localPhaser.getRegisteredParties() == 1;
- localPhaser.arrive(); // we should be the only ones registered, so this will advance phase each time
+ // we should be the only ones registered, so this will advance phase each time
+ localPhaser.arrive();
}
return false;
}
/**
- * A phaser that will advance phases every time {@link #onChange(Map)} is called.
- * Useful for allowing tests to know when a new configuration is finished getting set.
+ * A phaser that will advance phases every time {@link #onChange(Map)} is called. Useful for
+ * allowing tests to know when a new configuration is finished getting set.
*/
-
@VisibleForTesting
public void setPhaser(Phaser phaser) {
phaser.register();
@@ -107,6 +107,7 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
public void registerListener(PluginRegistryListener listener) {
listeners.add(listener);
}
+
public void unregisterListener(PluginRegistryListener listener) {
listeners.remove(listener);
}
@@ -123,11 +124,14 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
@Override
public synchronized void close() throws IOException {
- currentPlugins.values().forEach(apiInfo -> {
- if (apiInfo.instance instanceof Closeable) {
- IOUtils.closeQuietly((Closeable) apiInfo.instance);
- }
- });
+ currentPlugins
+ .values()
+ .forEach(
+ apiInfo -> {
+ if (apiInfo.instance instanceof Closeable) {
+ IOUtils.closeQuietly((Closeable) apiInfo.instance);
+ }
+ });
}
public synchronized ApiInfo getPlugin(String name) {
@@ -147,7 +151,7 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
public boolean equals(Object obj) {
if (obj instanceof PluginMetaHolder) {
PluginMetaHolder that = (PluginMetaHolder) obj;
- return Objects.equals(this.original,that.original);
+ return Objects.equals(this.original, that.original);
}
return false;
}
@@ -157,6 +161,7 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
return original.hashCode();
}
}
+
@SuppressWarnings("unchecked")
public synchronized void refresh() {
Map<String, Object> pluginInfos;
@@ -166,10 +171,10 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
log.error("Could not read plugins data", e);
return;
}
- Map<String,PluginMetaHolder> newState = new HashMap<>(pluginInfos.size());
+ Map<String, PluginMetaHolder> newState = new HashMap<>(pluginInfos.size());
for (Map.Entry<String, Object> e : pluginInfos.entrySet()) {
try {
- newState.put(e.getKey(),new PluginMetaHolder((Map<String, Object>) e.getValue()));
+ newState.put(e.getKey(), new PluginMetaHolder((Map<String, Object>) e.getValue()));
} catch (Exception exp) {
log.error("Invalid apiInfo configuration :", exp);
}
@@ -180,7 +185,7 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
currentState.put(e.getKey(), e.getValue().holder);
}
Map<String, Diff> diff = compareMaps(currentState, newState);
- if (diff == null) return;//nothing has changed
+ if (diff == null) return; // nothing has changed
for (Map.Entry<String, Diff> e : diff.entrySet()) {
if (e.getValue() == Diff.UNCHANGED) continue;
if (e.getValue() == Diff.REMOVED) {
@@ -188,17 +193,19 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
if (apiInfo == null) continue;
listeners.forEach(listener -> listener.deleted(apiInfo));
for (ApiHolder holder : apiInfo.holders) {
- Api old = containerApiBag.unregister(holder.api.getEndPoint().method()[0],
- getActualPath(apiInfo, holder.api.getEndPoint().path()[0]));
+ Api old =
+ containerApiBag.unregister(
+ holder.api.getEndPoint().method()[0],
+ getActualPath(apiInfo, holder.api.getEndPoint().path()[0]));
if (old instanceof Closeable) {
closeWhileHandlingException((Closeable) old);
}
}
} else {
- //ADDED or UPDATED
+ // ADDED or UPDATED
PluginMetaHolder info = newState.get(e.getKey());
List<String> errs = new ArrayList<>();
- ApiInfo apiInfo = new ApiInfo(info,errs);
+ ApiInfo apiInfo = new ApiInfo(info, errs);
if (!errs.isEmpty()) {
log.error(StrUtils.join(errs, ','));
continue;
@@ -218,31 +225,31 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
final ApiInfo apiInfoFinal = apiInfo;
listeners.forEach(listener -> listener.added(apiInfoFinal));
} else {
- //this plugin is being updated
+ // this plugin is being updated
ApiInfo old = currentPlugins.put(e.getKey(), apiInfo);
for (ApiHolder holder : apiInfo.holders) {
- //register all new paths
+ // register all new paths
containerApiBag.register(holder, getTemplateVars(apiInfo.info));
}
final ApiInfo apiInfoFinal = apiInfo;
listeners.forEach(listener -> listener.modified(old, apiInfoFinal));
if (old != null) {
- //this is an update of the plugin. But, it is possible that
+ // this is an update of the plugin. But, it is possible that
// some paths are remved in the newer version of the plugin
for (ApiHolder oldHolder : old.holders) {
- if(apiInfo.get(oldHolder.api.getEndPoint()) == null) {
- //there was a path in the old plugin which is not present in the new one
- containerApiBag.unregister(oldHolder.getMethod(),getActualPath(old, oldHolder.getPath()));
+ if (apiInfo.get(oldHolder.api.getEndPoint()) == null) {
+ // there was a path in the old plugin which is not present in the new one
+ containerApiBag.unregister(
+ oldHolder.getMethod(), getActualPath(old, oldHolder.getPath()));
}
}
if (old instanceof Closeable) {
- //close the old instance of the plugin
+ // close the old instance of the plugin
closeWhileHandlingException((Closeable) old);
}
}
}
}
-
}
}
@@ -269,13 +276,12 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
api.call(req, rsp);
}
- public String getPath(){
+ public String getPath() {
return api.getEndPoint().path()[0];
}
- public SolrRequest.METHOD getMethod(){
+ public SolrRequest.METHOD getMethod() {
return api.getEndPoint().method()[0];
-
}
}
@@ -284,8 +290,7 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
private final PluginMetaHolder holder;
- @JsonProperty
- private final PluginMeta info;
+ @JsonProperty private final PluginMeta info;
@JsonProperty(value = "package")
public final String pkg;
@@ -297,8 +302,8 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
ApiHolder get(EndPoint endPoint) {
for (ApiHolder holder : holders) {
EndPoint e = holder.api.getEndPoint();
- if(Objects.equals(endPoint.method()[0] , e.method()[0]) &&
- Objects.equals(endPoint.path()[0], e.path()[0])) {
+ if (Objects.equals(endPoint.method()[0], e.method()[0])
+ && Objects.equals(endPoint.path()[0], e.path()[0])) {
return holder;
}
}
@@ -312,17 +317,19 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
public PluginMeta getInfo() {
return info.copy();
}
+
public ApiInfo(PluginMetaHolder infoHolder, List<String> errs) {
this.holder = infoHolder;
this.info = infoHolder.meta;
PluginInfo.ClassName klassInfo = new PluginInfo.ClassName(info.klass);
pkg = klassInfo.pkg;
if (pkg != null) {
- Optional<PackageLoader.Package.Version> ver = coreContainer.getPackageLoader().getPackageVersion(pkg, info.version);
+ Optional<PackageLoader.Package.Version> ver =
+ coreContainer.getPackageLoader().getPackageVersion(pkg, info.version);
if (ver.isEmpty()) {
- //may be we are a bit early. Do a refresh and try again
- coreContainer.getPackageLoader().getPackageAPI().refreshPackages(null);
- ver = coreContainer.getPackageLoader().getPackageVersion(pkg, info.version);
+ // may be we are a bit early. Do a refresh and try again
+ coreContainer.getPackageLoader().getPackageAPI().refreshPackages(null);
+ ver = coreContainer.getPackageLoader().getPackageVersion(pkg, info.version);
}
if (ver.isEmpty()) {
PackageLoader.Package p = coreContainer.getPackageLoader().getPackage(pkg);
@@ -330,7 +337,13 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
errs.add("Invalid package " + klassInfo.pkg);
return;
} else {
- errs.add("No such package version:" + pkg + ":" + info.version + " . available versions :" + p.allVersions());
+ errs.add(
+ "No such package version:"
+ + pkg
+ + ":"
+ + info.version
+ + " . available versions :"
+ + p.allVersions());
return;
}
}
@@ -369,9 +382,8 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
List<String> pathSegments = StrUtils.splitSmart(endPoint.path()[0], '/', true);
PathTrie.replaceTemplates(pathSegments, getTemplateVars(info));
if (V2HttpCall.knownPrefixes.contains(pathSegments.get(0))) {
- errs.add("path must not have a prefix: "+pathSegments.get(0));
+ errs.add("path must not have a prefix: " + pathSegments.get(0));
}
-
}
} catch (Exception e) {
errs.add(e.toString());
@@ -379,9 +391,11 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
if (!errs.isEmpty()) return;
Constructor<?> constructor = klas.getConstructors()[0];
- if (constructor.getParameterTypes().length > 1 ||
- (constructor.getParameterTypes().length == 1 && constructor.getParameterTypes()[0] != CoreContainer.class)) {
- errs.add("Must have a no-arg constructor or CoreContainer constructor and it must not be a non static inner class");
+ if (constructor.getParameterTypes().length > 1
+ || (constructor.getParameterTypes().length == 1
+ && constructor.getParameterTypes()[0] != CoreContainer.class)) {
+ errs.add(
+ "Must have a no-arg constructor or CoreContainer constructor and it must not be a non static inner class");
return;
}
if (!Modifier.isPublic(constructor.getModifiers())) {
@@ -396,18 +410,20 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
Constructor<?> constructor = klas.getConstructors()[0];
if (constructor.getParameterTypes().length == 0) {
instance = constructor.newInstance();
- } else if (constructor.getParameterTypes().length == 1 && constructor.getParameterTypes()[0] == CoreContainer.class) {
+ } else if (constructor.getParameterTypes().length == 1
+ && constructor.getParameterTypes()[0] == CoreContainer.class) {
instance = constructor.newInstance(coreContainer);
} else {
throw new RuntimeException("Must have a no-arg constructor or CoreContainer constructor ");
}
if (instance instanceof ConfigurablePlugin) {
- Class<? extends MapWriter> c = getConfigClass((ConfigurablePlugin<? extends MapWriter>) instance);
+ Class<? extends MapWriter> c =
+ getConfigClass((ConfigurablePlugin<? extends MapWriter>) instance);
if (c != null) {
- Map<String, Object> original = (Map<String, Object>) holder.original.getOrDefault("config", Collections.emptyMap());
+ Map<String, Object> original =
+ (Map<String, Object>) holder.original.getOrDefault("config", Collections.emptyMap());
holder.meta.config = mapper.readValue(Utils.toJSON(original), c);
((ConfigurablePlugin<MapWriter>) instance).configure(holder.meta.config);
-
}
}
if (instance instanceof ResourceLoaderAware) {
@@ -422,12 +438,9 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
holders.add(new ApiHolder((AnnotatedApi) api));
}
}
-
}
- /**
- * Get the generic type of a {@link ConfigurablePlugin}
- */
+ /** Get the generic type of a {@link ConfigurablePlugin} */
@SuppressWarnings("unchecked")
public static <T extends MapWriter> Class<T> getConfigClass(ConfigurablePlugin<T> o) {
Class<?> klas = o.getClass();
@@ -437,9 +450,11 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
if (type instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) type;
Type rawType = parameterizedType.getRawType();
- if (rawType == ConfigurablePlugin.class ||
+ if (rawType == ConfigurablePlugin.class
+ ||
// or if a super interface is a ConfigurablePlugin
- ((rawType instanceof Class<?>) && ConfigurablePlugin.class.isAssignableFrom((Class<?>) rawType))) {
+ ((rawType instanceof Class<?>)
+ && ConfigurablePlugin.class.isAssignableFrom((Class<?>) rawType))) {
return (Class<T>) parameterizedType.getActualTypeArguments()[0];
}
@@ -450,32 +465,34 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
return null;
}
- public ApiInfo createInfo(Map<String,Object> info, List<String> errs) throws IOException {
+ public ApiInfo createInfo(Map<String, Object> info, List<String> errs) throws IOException {
return new ApiInfo(new PluginMetaHolder(info), errs);
-
}
public enum Diff {
- ADDED, REMOVED, UNCHANGED, UPDATED
+ ADDED,
+ REMOVED,
+ UNCHANGED,
+ UPDATED
}
public static Map<String, Diff> compareMaps(Map<String, ?> a, Map<String, ?> b) {
- if(a.isEmpty() && b.isEmpty()) return null;
+ if (a.isEmpty() && b.isEmpty()) return null;
Map<String, Diff> result = new HashMap<>(Math.max(a.size(), b.size()));
- a.forEach((k, v) -> {
- Object newVal = b.get(k);
- if (newVal == null) {
- result.put(k, Diff.REMOVED);
- return;
- }
- result.put(k, Objects.equals(v, newVal) ?
- Diff.UNCHANGED :
- Diff.UPDATED);
- });
+ a.forEach(
+ (k, v) -> {
+ Object newVal = b.get(k);
+ if (newVal == null) {
+ result.put(k, Diff.REMOVED);
+ return;
+ }
+ result.put(k, Objects.equals(v, newVal) ? Diff.UNCHANGED : Diff.UPDATED);
+ });
- b.forEach((k, v) -> {
- if (a.get(k) == null) result.put(k, Diff.ADDED);
- });
+ b.forEach(
+ (k, v) -> {
+ if (a.get(k) == null) result.put(k, Diff.ADDED);
+ });
for (Diff value : result.values()) {
if (value != Diff.UNCHANGED) return result;
@@ -484,9 +501,7 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
return null;
}
- /**
- * Listener for notifications about added / deleted / modified plugins.
- */
+ /** Listener for notifications about added / deleted / modified plugins. */
public interface PluginRegistryListener {
/** Called when a new plugin is added. */
@@ -497,6 +512,5 @@ public class ContainerPluginsRegistry implements ClusterPropertiesListener, MapW
/** Called when an existing plugin is replaced. */
void modified(ApiInfo old, ApiInfo replacement);
-
}
}
diff --git a/solr/core/src/java/org/apache/solr/api/EndPoint.java b/solr/core/src/java/org/apache/solr/api/EndPoint.java
index f0358dc..0c1ae66 100644
--- a/solr/core/src/java/org/apache/solr/api/EndPoint.java
+++ b/solr/core/src/java/org/apache/solr/api/EndPoint.java
@@ -21,7 +21,6 @@ import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
-
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.security.PermissionNameProvider;
diff --git a/solr/core/src/java/org/apache/solr/api/PayloadObj.java b/solr/core/src/java/org/apache/solr/api/PayloadObj.java
index df4c1e4..7e908a6 100644
--- a/solr/core/src/java/org/apache/solr/api/PayloadObj.java
+++ b/solr/core/src/java/org/apache/solr/api/PayloadObj.java
@@ -21,32 +21,35 @@ import org.apache.solr.common.util.CommandOperation;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
-/**
- * Holds the deserialized object for each command and also holds request , response objects
- */
+/** Holds the deserialized object for each command and also holds request , response objects */
public class PayloadObj<T> extends CommandOperation {
- //the deserialized object parameter
- private T obj;
- final SolrQueryRequest req;
- final SolrQueryResponse rsp;
-
- public PayloadObj(String operationName, Object originalMetadata, T obj, SolrQueryRequest req, SolrQueryResponse rsp) {
- super(operationName, originalMetadata);
- this.obj = obj;
- this.req = req;
- this.rsp = rsp;
- }
-
- public T get() {
- return obj;
- }
-
- public SolrQueryRequest getRequest() {
- return req;
- }
-
- public SolrQueryResponse getResponse() {
- return rsp;
- }
+ // the deserialized object parameter
+ private T obj;
+ final SolrQueryRequest req;
+ final SolrQueryResponse rsp;
+
+ public PayloadObj(
+ String operationName,
+ Object originalMetadata,
+ T obj,
+ SolrQueryRequest req,
+ SolrQueryResponse rsp) {
+ super(operationName, originalMetadata);
+ this.obj = obj;
+ this.req = req;
+ this.rsp = rsp;
+ }
+
+ public T get() {
+ return obj;
+ }
+
+ public SolrQueryRequest getRequest() {
+ return req;
+ }
+
+ public SolrQueryResponse getResponse() {
+ return rsp;
+ }
}
diff --git a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
index effd8b1..a463ce3 100644
--- a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
+++ b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
@@ -17,9 +17,17 @@
package org.apache.solr.api;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.servlet.SolrDispatchFilter.Action.*;
+
import com.google.common.collect.ImmutableSet;
import io.opentracing.Span;
import io.opentracing.tag.Tags;
+import java.lang.invoke.MethodHandles;
+import java.util.*;
+import java.util.function.Supplier;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.annotation.SolrThreadSafe;
@@ -44,15 +52,6 @@ import org.apache.solr.servlet.SolrRequestParsers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.lang.invoke.MethodHandles;
-import java.util.*;
-import java.util.function.Supplier;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.*;
-
// class that handle the '/v2' path
@SolrThreadSafe
public class V2HttpCall extends HttpSolrCall {
@@ -61,27 +60,34 @@ public class V2HttpCall extends HttpSolrCall {
private List<String> pathSegments;
private String prefix;
HashMap<String, String> parts = new HashMap<>();
- static final Set<String> knownPrefixes = ImmutableSet.of("cluster", "node", "collections", "cores", "c");
-
- public V2HttpCall(SolrDispatchFilter solrDispatchFilter, CoreContainer cc,
- HttpServletRequest request, HttpServletResponse response, boolean retry) {
+ static final Set<String> knownPrefixes =
+ ImmutableSet.of("cluster", "node", "collections", "cores", "c");
+
+ public V2HttpCall(
+ SolrDispatchFilter solrDispatchFilter,
+ CoreContainer cc,
+ HttpServletRequest request,
+ HttpServletResponse response,
+ boolean retry) {
super(solrDispatchFilter, cc, request, response, retry);
}
protected void init() throws Exception {
queryParams = SolrRequestParsers.parseQueryString(req.getQueryString());
String path = this.path;
- final String fullPath = path = path.substring(7);//strip off '/____v2'
+ final String fullPath = path = path.substring(7); // strip off '/____v2'
try {
pathSegments = PathTrie.getPathSegments(path);
- if (pathSegments.size() == 0 || (pathSegments.size() == 1 && path.endsWith(CommonParams.INTROSPECT))) {
- api = new Api(null) {
- @Override
- public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
- rsp.add("documentation", "https://solr.apache.org/guide/v2-api.html");
- rsp.add("description", "V2 API root path");
- }
- };
+ if (pathSegments.size() == 0
+ || (pathSegments.size() == 1 && path.endsWith(CommonParams.INTROSPECT))) {
+ api =
+ new Api(null) {
+ @Override
+ public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
+ rsp.add("documentation", "https://solr.apache.org/guide/v2-api.html");
+ rsp.add("description", "V2 API root path");
+ }
+ };
initAdminRequest(path);
return;
} else {
@@ -103,21 +109,24 @@ public class V2HttpCall extends HttpSolrCall {
if ("c".equals(prefix) || "collections".equals(prefix)) {
origCorename = pathSegments.get(1);
- DocCollection collection = resolveDocCollection(queryParams.get(COLLECTION_PROP, origCorename));
+ DocCollection collection =
+ resolveDocCollection(queryParams.get(COLLECTION_PROP, origCorename));
if (collection == null) {
- if ( ! path.endsWith(CommonParams.INTROSPECT)) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such collection or alias");
+ if (!path.endsWith(CommonParams.INTROSPECT)) {
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST, "no such collection or alias");
}
} else {
boolean isPreferLeader = (path.endsWith("/update") || path.contains("/update/"));
core = getCoreByCollection(collection.getName(), isPreferLeader);
if (core == null) {
- //this collection exists , but this node does not have a replica for that collection
+ // this collection exists , but this node does not have a replica for that collection
extractRemotePath(collection.getName(), collection.getName());
if (action == REMOTEQUERY) {
coreUrl = coreUrl.replace("/solr/", "/solr/____v2/c/");
- this.path = path = path.substring(prefix.length() + collection.getName().length() + 2);
+ this.path =
+ path = path.substring(prefix.length() + collection.getName().length() + 2);
return;
}
}
@@ -127,8 +136,8 @@ public class V2HttpCall extends HttpSolrCall {
core = cores.getCore(origCorename);
} else {
api = getApiInfo(cores.getRequestHandlers(), path, req.getMethod(), fullPath, parts);
- if(api != null) {
- //custom plugin
+ if (api != null) {
+ // custom plugin
initAdminRequest(path);
return;
}
@@ -139,7 +148,9 @@ public class V2HttpCall extends HttpSolrCall {
initAdminRequest(path);
return;
} else {
- throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "no core retrieved for core name: " + origCorename + ". Path : "+ path);
+ throw new SolrException(
+ SolrException.ErrorCode.NOT_FOUND,
+ "no core retrieved for core name: " + origCorename + ". Path : " + path);
}
}
@@ -183,29 +194,37 @@ public class V2HttpCall extends HttpSolrCall {
}
/**
- * Lookup the collection from the collection string (maybe comma delimited).
- * Also sets {@link #collectionsList} by side-effect.
- * if {@code secondTry} is false then we'll potentially recursively try this all one more time while ensuring
- * the alias and collection info is sync'ed from ZK.
+ * Lookup the collection from the collection string (maybe comma delimited). Also sets {@link
+ * #collectionsList} by side-effect. if {@code secondTry} is false then we'll potentially
+ * recursively try this all one more time while ensuring the alias and collection info is sync'ed
+ * from ZK.
*/
protected DocCollection resolveDocCollection(String collectionStr) {
if (!cores.isZooKeeperAware()) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Solr not running in cloud mode ");
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST, "Solr not running in cloud mode ");
}
ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();
- Supplier<DocCollection> logic = () -> {
- this.collectionsList = resolveCollectionListOrAlias(collectionStr); // side-effect
- if (collectionsList.size() > 1) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Request must be sent to a single collection " +
- "or an alias that points to a single collection," +
- " but '" + collectionStr + "' resolves to " + this.collectionsList);
- }
- String collectionName = collectionsList.get(0); // first
- //TODO an option to choose another collection in the list if can't find a local replica of the first?
+ Supplier<DocCollection> logic =
+ () -> {
+ this.collectionsList = resolveCollectionListOrAlias(collectionStr); // side-effect
+ if (collectionsList.size() > 1) {
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Request must be sent to a single collection "
+ + "or an alias that points to a single collection,"
+ + " but '"
+ + collectionStr
+ + "' resolves to "
+ + this.collectionsList);
+ }
+ String collectionName = collectionsList.get(0); // first
+ // TODO an option to choose another collection in the list if can't find a local replica
+ // of the first?
- return zkStateReader.getClusterState().getCollectionOrNull(collectionName);
- };
+ return zkStateReader.getClusterState().getCollectionOrNull(collectionName);
+ };
DocCollection docCollection = logic.get();
if (docCollection != null) {
@@ -217,15 +236,17 @@ public class V2HttpCall extends HttpSolrCall {
zkStateReader.forceUpdateCollection(collectionsList.get(0));
} catch (Exception e) {
log.error("Error trying to update state while resolving collection.", e);
- //don't propagate exception on purpose
+ // don't propagate exception on purpose
}
return logic.get();
}
- public static Api getApiInfo(PluginBag<SolrRequestHandler> requestHandlers,
- String path, String method,
- String fullPath,
- Map<String, String> parts) {
+ public static Api getApiInfo(
+ PluginBag<SolrRequestHandler> requestHandlers,
+ String path,
+ String method,
+ String fullPath,
+ Map<String, String> parts) {
fullPath = fullPath == null ? path : fullPath;
Api api = requestHandlers.v2lookup(path, method, parts);
if (api == null && path.endsWith(CommonParams.INTROSPECT)) {
@@ -244,59 +265,69 @@ public class V2HttpCall extends HttpSolrCall {
Api x = requestHandlers.v2lookup(path, m, parts);
if (x != null) apis.put(m, x);
}
- api = new CompositeApi(new Api(ApiBag.EMPTY_SPEC) {
- @Override
- public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
- String method = req.getParams().get("method");
- Set<Api> added = new HashSet<>();
- for (Map.Entry<String, Api> e : apis.entrySet()) {
- if (method == null || e.getKey().equals(method)) {
- if (!added.contains(e.getValue())) {
- e.getValue().call(req, rsp);
- added.add(e.getValue());
- }
- }
- }
- RequestHandlerUtils.addExperimentalFormatWarning(rsp);
- }
- });
- getSubPathApi(requestHandlers,path, fullPath, (CompositeApi) api);
+ api =
+ new CompositeApi(
+ new Api(ApiBag.EMPTY_SPEC) {
+ @Override
+ public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
+ String method = req.getParams().get("method");
+ Set<Api> added = new HashSet<>();
+ for (Map.Entry<String, Api> e : apis.entrySet()) {
+ if (method == null || e.getKey().equals(method)) {
+ if (!added.contains(e.getValue())) {
+ e.getValue().call(req, rsp);
+ added.add(e.getValue());
+ }
+ }
+ }
+ RequestHandlerUtils.addExperimentalFormatWarning(rsp);
+ }
+ });
+ getSubPathApi(requestHandlers, path, fullPath, (CompositeApi) api);
}
-
return api;
}
- private static CompositeApi getSubPathApi(PluginBag<SolrRequestHandler> requestHandlers, String path, String fullPath, CompositeApi compositeApi) {
+ private static CompositeApi getSubPathApi(
+ PluginBag<SolrRequestHandler> requestHandlers,
+ String path,
+ String fullPath,
+ CompositeApi compositeApi) {
- String newPath = path.endsWith(CommonParams.INTROSPECT) ? path.substring(0, path.length() - CommonParams.INTROSPECT.length()) : path;
+ String newPath =
+ path.endsWith(CommonParams.INTROSPECT)
+ ? path.substring(0, path.length() - CommonParams.INTROSPECT.length())
+ : path;
Map<String, Set<String>> subpaths = new LinkedHashMap<>();
getSubPaths(newPath, requestHandlers.getApiBag(), subpaths);
final Map<String, Set<String>> subPaths = subpaths;
if (subPaths.isEmpty()) return null;
- return compositeApi.add(new Api(() -> ValidatingJsonMap.EMPTY) {
- @Override
- public void call(SolrQueryRequest req1, SolrQueryResponse rsp) {
- String prefix = null;
- prefix = fullPath.endsWith(CommonParams.INTROSPECT) ?
- fullPath.substring(0, fullPath.length() - CommonParams.INTROSPECT.length()) :
- fullPath;
- LinkedHashMap<String, Set<String>> result = new LinkedHashMap<>(subPaths.size());
- for (Map.Entry<String, Set<String>> e : subPaths.entrySet()) {
- if (e.getKey().endsWith(CommonParams.INTROSPECT)) continue;
- result.put(prefix + e.getKey(), e.getValue());
- }
+ return compositeApi.add(
+ new Api(() -> ValidatingJsonMap.EMPTY) {
+ @Override
+ public void call(SolrQueryRequest req1, SolrQueryResponse rsp) {
+ String prefix = null;
+ prefix =
+ fullPath.endsWith(CommonParams.INTROSPECT)
+ ? fullPath.substring(0, fullPath.length() - CommonParams.INTROSPECT.length())
+ : fullPath;
+ LinkedHashMap<String, Set<String>> result = new LinkedHashMap<>(subPaths.size());
+ for (Map.Entry<String, Set<String>> e : subPaths.entrySet()) {
+ if (e.getKey().endsWith(CommonParams.INTROSPECT)) continue;
+ result.put(prefix + e.getKey(), e.getValue());
+ }
- @SuppressWarnings({"unchecked"})
- Map<Object, Object> m = (Map<Object, Object>) rsp.getValues().get("availableSubPaths");
- if(m != null){
- m.putAll(result);
- } else {
- rsp.add("availableSubPaths", result);
- }
- }
- });
+ @SuppressWarnings({"unchecked"})
+ Map<Object, Object> m = (Map<Object, Object>) rsp.getValues().get("availableSubPaths");
+ if (m != null) {
+ m.putAll(result);
+ } else {
+ rsp.add("availableSubPaths", result);
+ }
+ }
+ });
}
private static void getSubPaths(String path, ApiBag bag, Map<String, Set<String>> pathsVsMethod) {
@@ -307,7 +338,8 @@ public class V2HttpCall extends HttpSolrCall {
registry.lookup(path, new HashMap<>(), subPaths);
for (String subPath : subPaths) {
Set<String> supportedMethods = pathsVsMethod.get(subPath);
- if (supportedMethods == null) pathsVsMethod.put(subPath, supportedMethods = new HashSet<>());
+ if (supportedMethods == null)
+ pathsVsMethod.put(subPath, supportedMethods = new HashSet<>());
supportedMethods.add(m.toString());
}
}
@@ -331,7 +363,6 @@ public class V2HttpCall extends HttpSolrCall {
for (Api api : apis) {
api.call(req, rsp);
}
-
}
public CompositeApi add(Api api) {
@@ -353,8 +384,11 @@ public class V2HttpCall extends HttpSolrCall {
protected void execute(SolrQueryResponse rsp) {
SolrCore.preDecorateResponse(solrReq, rsp);
if (api == null) {
- rsp.setException(new SolrException(SolrException.ErrorCode.NOT_FOUND,
- "Cannot find correspond api for the path : " + solrReq.getContext().get(CommonParams.PATH)));
+ rsp.setException(
+ new SolrException(
+ SolrException.ErrorCode.NOT_FOUND,
+ "Cannot find correspond api for the path : "
+ + solrReq.getContext().get(CommonParams.PATH)));
} else {
try {
api.call(solrReq, rsp);
@@ -408,10 +442,7 @@ public class V2HttpCall extends HttpSolrCall {
span.setOperationName(verb + ":" + path);
}
- /**
- * Example:
- * /c/collection1/ and template map collection->collection1 produces /c/{collection}.
- */
+ /** Example: /c/collection1/ and template map collection->collection1 produces /c/{collection}. */
private String computeEndpointPath() {
// It's not ideal to compute this; let's try to transition away from hitting this code path
// by using Annotation APIs
@@ -448,7 +479,7 @@ public class V2HttpCall extends HttpSolrCall {
return api;
}
- public Map<String,String> getUrlParts(){
+ public Map<String, String> getUrlParts() {
return parts;
}
diff --git a/solr/core/src/java/org/apache/solr/api/package-info.java b/solr/core/src/java/org/apache/solr/api/package-info.java
index c3574c7..6e4ddf9 100644
--- a/solr/core/src/java/org/apache/solr/api/package-info.java
+++ b/solr/core/src/java/org/apache/solr/api/package-info.java
@@ -14,8 +14,5 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-/**
- * Commonly used classes for Solr V2 API.
- */
+/** Commonly used classes for Solr V2 API. */
package org.apache.solr.api;
-
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
index 1066c3e..4a39487 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
@@ -28,7 +28,6 @@ import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.function.Supplier;
-
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.lucene.search.TotalHits.Relation;
import org.apache.solr.client.solrj.SolrClient;
@@ -74,7 +73,8 @@ public class EmbeddedSolrServer extends SolrClient {
private boolean containerIsLocal = false;
public enum RequestWriterSupplier {
- JavaBin(() -> new BinaryRequestWriter()), XML(() -> new RequestWriter());
+ JavaBin(() -> new BinaryRequestWriter()),
+ XML(() -> new RequestWriter());
private Supplier<RequestWriter> supplier;
@@ -90,7 +90,7 @@ public class EmbeddedSolrServer extends SolrClient {
/**
* Create an EmbeddedSolrServer using a given solr home directory
*
- * @param solrHome the solr home directory
+ * @param solrHome the solr home directory
* @param defaultCoreName the core to route requests to by default (optional)
*/
public EmbeddedSolrServer(Path solrHome, String defaultCoreName) {
@@ -101,7 +101,7 @@ public class EmbeddedSolrServer extends SolrClient {
/**
* Create an EmbeddedSolrServer using a NodeConfig
*
- * @param nodeConfig the configuration
+ * @param nodeConfig the configuration
* @param defaultCoreName the core to route requests to by default (optional)
*/
public EmbeddedSolrServer(NodeConfig nodeConfig, String defaultCoreName) {
@@ -114,9 +114,7 @@ public class EmbeddedSolrServer extends SolrClient {
return cc;
}
- /**
- * Create an EmbeddedSolrServer wrapping a particular SolrCore
- */
+ /** Create an EmbeddedSolrServer wrapping a particular SolrCore */
public EmbeddedSolrServer(SolrCore core) {
this(core.getCoreContainer(), core.getName());
}
@@ -125,7 +123,7 @@ public class EmbeddedSolrServer extends SolrClient {
* Create an EmbeddedSolrServer wrapping a CoreContainer.
*
* @param coreContainer the core container
- * @param coreName the core to route requests to by default (optional)
+ * @param coreName the core to route requests to by default (optional)
*/
public EmbeddedSolrServer(CoreContainer coreContainer, String coreName) {
this(coreContainer, coreName, RequestWriterSupplier.JavaBin);
@@ -134,15 +132,12 @@ public class EmbeddedSolrServer extends SolrClient {
/**
* Create an EmbeddedSolrServer wrapping a CoreContainer.
*
- * @param coreContainer
- * the core container
- * @param coreName
- * the core to route requests to by default
- * @param supplier
- * the supplier used to create a {@link RequestWriter}
+ * @param coreContainer the core container
+ * @param coreName the core to route requests to by default
+ * @param supplier the supplier used to create a {@link RequestWriter}
*/
- public EmbeddedSolrServer(CoreContainer coreContainer, String coreName,
- RequestWriterSupplier supplier) {
+ public EmbeddedSolrServer(
+ CoreContainer coreContainer, String coreName, RequestWriterSupplier supplier) {
if (coreContainer == null) {
throw new NullPointerException("CoreContainer instance required");
}
@@ -156,7 +151,8 @@ public class EmbeddedSolrServer extends SolrClient {
// It *should* be able to convert the response directly into a named list.
@Override
- public NamedList<Object> request(SolrRequest<?> request, String coreName) throws SolrServerException, IOException {
+ public NamedList<Object> request(SolrRequest<?> request, String coreName)
+ throws SolrServerException, IOException {
String path = request.getPath();
if (path == null || !path.startsWith("/")) {
@@ -166,7 +162,8 @@ public class EmbeddedSolrServer extends SolrClient {
SolrRequestHandler handler = coreContainer.getRequestHandler(path);
if (handler != null) {
try {
- SolrQueryRequest req = _parser.buildRequestFrom(null, request.getParams(), getContentStreams(request));
+ SolrQueryRequest req =
+ _parser.buildRequestFrom(null, request.getParams(), getContentStreams(request));
req.getContext().put("httpMethod", request.getMethod().name());
req.getContext().put(PATH, path);
SolrQueryResponse resp = new SolrQueryResponse();
@@ -183,7 +180,8 @@ public class EmbeddedSolrServer extends SolrClient {
if (coreName == null) {
coreName = this.coreName;
if (coreName == null) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
"No core specified on request and no default core has been set.");
}
}
@@ -247,13 +245,15 @@ public class EmbeddedSolrServer extends SolrClient {
}
};
-
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
- createJavaBinCodec(callback, resolver).setWritableDocFields(resolver).marshal(rsp.getValues(), out);
+ createJavaBinCodec(callback, resolver)
+ .setWritableDocFields(resolver)
+ .marshal(rsp.getValues(), out);
try (InputStream in = out.toInputStream()) {
@SuppressWarnings({"unchecked"})
- NamedList<Object> resolved = (NamedList<Object>) new JavaBinCodec(resolver).unmarshal(in);
+ NamedList<Object> resolved =
+ (NamedList<Object>) new JavaBinCodec(resolver).unmarshal(in);
return resolved;
}
}
@@ -300,30 +300,32 @@ public class EmbeddedSolrServer extends SolrClient {
final byte[] buf = baos.toByteArray();
if (buf.length > 0) {
- return Collections.singleton(new ContentStreamBase() {
+ return Collections.singleton(
+ new ContentStreamBase() {
- @Override
- public InputStream getStream() throws IOException {
- return new ByteArrayInputStream(buf);
- }
+ @Override
+ public InputStream getStream() throws IOException {
+ return new ByteArrayInputStream(buf);
+ }
- @Override
- public String getContentType() {
- return cType;
- }
- });
+ @Override
+ public String getContentType() {
+ return cType;
+ }
+ });
}
return null;
}
- private JavaBinCodec createJavaBinCodec(final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) {
+ private JavaBinCodec createJavaBinCodec(
+ final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) {
return new JavaBinCodec(resolver) {
@Override
public void writeSolrDocument(SolrDocument doc) {
callback.streamSolrDocument(doc);
- //super.writeSolrDocument( doc, fields );
+ // super.writeSolrDocument( doc, fields );
}
@Override
@@ -338,7 +340,6 @@ public class EmbeddedSolrServer extends SolrClient {
callback.streamDocListInfo(docs.getNumFound(), docs.getStart(), docs.getMaxScore());
super.writeSolrDocumentList(docs);
}
-
};
}
@@ -349,12 +350,9 @@ public class EmbeddedSolrServer extends SolrClient {
}
throw new SolrServerException(rsp.getException());
}
-
}
- /**
- * Closes any resources created by this instance
- */
+ /** Closes any resources created by this instance */
@Override
public void close() throws IOException {
if (containerIsLocal) {
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
index e4a0547..b61a534 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
@@ -16,12 +16,11 @@
*/
package org.apache.solr.client.solrj.embedded;
-import org.eclipse.jetty.servlet.ServletHolder;
-
-import javax.servlet.Filter;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.TreeMap;
+import javax.servlet.Filter;
+import org.eclipse.jetty.servlet.ServletHolder;
public class JettyConfig {
@@ -34,9 +33,8 @@ public class JettyConfig {
public final boolean enableV2;
-
public final boolean stopAtShutdown;
-
+
public final Long waitForLoadingCoresToFinishMs;
public final Map<ServletHolder, String> extraServlets;
@@ -44,12 +42,20 @@ public class JettyConfig {
public final Map<Class<? extends Filter>, String> extraFilters;
public final SSLConfig sslConfig;
-
+
public final int portRetryTime;
- private JettyConfig(boolean onlyHttp1, int port, int portRetryTime , String context, boolean stopAtShutdown,
- Long waitForLoadingCoresToFinishMs, Map<ServletHolder, String> extraServlets,
- Map<Class<? extends Filter>, String> extraFilters, SSLConfig sslConfig, boolean enableV2) {
+ private JettyConfig(
+ boolean onlyHttp1,
+ int port,
+ int portRetryTime,
+ String context,
+ boolean stopAtShutdown,
+ Long waitForLoadingCoresToFinishMs,
+ Map<ServletHolder, String> extraServlets,
+ Map<Class<? extends Filter>, String> extraFilters,
+ SSLConfig sslConfig,
+ boolean enableV2) {
this.onlyHttp1 = onlyHttp1;
this.port = port;
this.context = context;
@@ -94,7 +100,8 @@ public class JettyConfig {
this.onlyHttp1 = useOnlyHttp1;
return this;
}
- public Builder enableV2(boolean flag){
+
+ public Builder enableV2(boolean flag) {
this.enableV2 = flag;
return this;
}
@@ -113,7 +120,7 @@ public class JettyConfig {
this.stopAtShutdown = stopAtShutdown;
return this;
}
-
+
public Builder waitForLoadingCoresToFinish(Long waitForLoadingCoresToFinishMs) {
this.waitForLoadingCoresToFinishMs = waitForLoadingCoresToFinishMs;
return this;
@@ -125,8 +132,7 @@ public class JettyConfig {
}
public Builder withServlets(Map<ServletHolder, String> servlets) {
- if (servlets != null)
- extraServlets.putAll(servlets);
+ if (servlets != null) extraServlets.putAll(servlets);
return this;
}
@@ -136,8 +142,7 @@ public class JettyConfig {
}
public Builder withFilters(Map<Class<? extends Filter>, String> filters) {
- if (filters != null)
- extraFilters.putAll(filters);
+ if (filters != null) extraFilters.putAll(filters);
return this;
}
@@ -145,18 +150,24 @@ public class JettyConfig {
this.sslConfig = sslConfig;
return this;
}
-
+
public Builder withPortRetryTime(int portRetryTime) {
this.portRetryTime = portRetryTime;
return this;
}
-
public JettyConfig build() {
- return new JettyConfig(onlyHttp1, port, portRetryTime, context, stopAtShutdown,
- waitForLoadingCoresToFinishMs, extraServlets, extraFilters, sslConfig, enableV2);
+ return new JettyConfig(
+ onlyHttp1,
+ port,
+ portRetryTime,
+ context,
+ stopAtShutdown,
+ waitForLoadingCoresToFinishMs,
+ extraServlets,
+ extraFilters,
+ sslConfig,
+ enableV2);
}
-
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index b0b2e99..14ee90e 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -16,17 +16,9 @@
*/
package org.apache.solr.client.solrj.embedded;
-import javax.servlet.DispatcherType;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.UnavailableException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
+import com.codahale.metrics.ConsoleReporter;
+import com.codahale.metrics.MetricFilter;
+import com.codahale.metrics.MetricRegistry;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
@@ -51,10 +43,17 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
-
-import com.codahale.metrics.ConsoleReporter;
-import com.codahale.metrics.MetricFilter;
-import com.codahale.metrics.MetricRegistry;
+import javax.servlet.DispatcherType;
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.UnavailableException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
import org.apache.commons.io.output.NullPrintStream;
import org.apache.lucene.util.Constants;
import org.apache.solr.client.solrj.SolrClient;
@@ -132,7 +131,8 @@ public class JettySolrRunner {
private LinkedList<FilterHolder> extraFilters;
- private static final String excludePatterns = "/partials/.+,/libs/.+,/css/.+,/js/.+,/img/.+,/templates/.+";
+ private static final String excludePatterns =
+ "/partials/.+,/libs/.+,/css/.+,/js/.+,/img/.+,/templates/.+";
private int proxyPort = -1;
@@ -157,7 +157,6 @@ public class JettySolrRunner {
public long getTotalRequests() {
return nRequests.get();
-
}
/**
@@ -171,30 +170,29 @@ public class JettySolrRunner {
delays.add(new Delay(reason, count, delay));
}
- /**
- * Remove any delay introduced before.
- */
+ /** Remove any delay introduced before. */
public void unsetDelay() {
delays.clear();
}
-
@Override
- public void init(FilterConfig filterConfig) throws ServletException { }
+ public void init(FilterConfig filterConfig) throws ServletException {}
@Override
- public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
+ public void doFilter(
+ ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
+ throws IOException, ServletException {
nRequests.incrementAndGet();
executeDelay();
filterChain.doFilter(servletRequest, servletResponse);
}
@Override
- public void destroy() { }
+ public void destroy() {}
private void executeDelay() {
int delayMs = 0;
- for (Delay delay: delays) {
+ for (Delay delay : delays) {
log.info("Delaying {}, for reason: {}", delay.delayValue, delay.reason);
if (delay.counter.decrementAndGet() == 0) {
delayMs += delay.delayValue;
@@ -211,13 +209,12 @@ public class JettySolrRunner {
log.info("Waking up after the delay of {}ms...", delayMs);
}
}
-
}
/**
* Create a new JettySolrRunner.
*
- * After construction, you must start the jetty with {@link #start()}
+ * <p>After construction, you must start the jetty with {@link #start()}
*
* @param solrHome the solr home directory to use
* @param context the context to run in
@@ -227,13 +224,12 @@ public class JettySolrRunner {
this(solrHome, JettyConfig.builder().setContext(context).setPort(port).build());
}
-
/**
* Construct a JettySolrRunner
*
- * After construction, you must start the jetty with {@link #start()}
+ * <p>After construction, you must start the jetty with {@link #start()}
*
- * @param solrHome the base path to run from
+ * @param solrHome the base path to run from
* @param config the configuration
*/
public JettySolrRunner(String solrHome, JettyConfig config) {
@@ -243,11 +239,11 @@ public class JettySolrRunner {
/**
* Construct a JettySolrRunner
*
- * After construction, you must start the jetty with {@link #start()}
+ * <p>After construction, you must start the jetty with {@link #start()}
*
- * @param solrHome the solrHome to use
- * @param nodeProperties the container properties
- * @param config the configuration
+ * @param solrHome the solrHome to use
+ * @param nodeProperties the container properties
+ * @param config the configuration
*/
public JettySolrRunner(String solrHome, Properties nodeProperties, JettyConfig config) {
this(solrHome, nodeProperties, config, false);
@@ -256,14 +252,15 @@ public class JettySolrRunner {
/**
* Construct a JettySolrRunner
*
- * After construction, you must start the jetty with {@link #start()}
+ * <p>After construction, you must start the jetty with {@link #start()}
*
- * @param solrHome the solrHome to use
- * @param nodeProperties the container properties
- * @param config the configuration
- * @param enableProxy enables proxy feature to disable connections
+ * @param solrHome the solrHome to use
+ * @param nodeProperties the container properties
+ * @param config the configuration
+ * @param enableProxy enables proxy feature to disable connections
*/
- public JettySolrRunner(String solrHome, Properties nodeProperties, JettyConfig config, boolean enableProxy) {
+ public JettySolrRunner(
+ String solrHome, Properties nodeProperties, JettyConfig config, boolean enableProxy) {
this.enableProxy = enableProxy;
this.solrHome = solrHome;
this.config = config;
@@ -311,9 +308,11 @@ public class JettySolrRunner {
HttpConnectionFactory http1ConnectionFactory = new HttpConnectionFactory(configuration);
if (config.onlyHttp1 || !Constants.JRE_IS_MINIMUM_JAVA9) {
- connector = new ServerConnector(server, new SslConnectionFactory(sslcontext,
- http1ConnectionFactory.getProtocol()),
- http1ConnectionFactory);
+ connector =
+ new ServerConnector(
+ server,
+ new SslConnectionFactory(sslcontext, http1ConnectionFactory.getProtocol()),
+ http1ConnectionFactory);
} else {
sslcontext.setCipherComparator(HTTP2Cipher.COMPARATOR);
@@ -322,11 +321,12 @@ public class JettySolrRunner {
connector.addConnectionFactory(sslConnectionFactory);
connector.setDefaultProtocol(sslConnectionFactory.getProtocol());
- HTTP2ServerConnectionFactory http2ConnectionFactory = new HTTP2ServerConnectionFactory(configuration);
+ HTTP2ServerConnectionFactory http2ConnectionFactory =
+ new HTTP2ServerConnectionFactory(configuration);
- ALPNServerConnectionFactory alpn = new ALPNServerConnectionFactory(
- http2ConnectionFactory.getProtocol(),
- http1ConnectionFactory.getProtocol());
+ ALPNServerConnectionFactory alpn =
+ new ALPNServerConnectionFactory(
+ http2ConnectionFactory.getProtocol(), http1ConnectionFactory.getProtocol());
alpn.setDefaultProtocol(http1ConnectionFactory.getProtocol());
connector.addConnectionFactory(alpn);
connector.addConnectionFactory(http1ConnectionFactory);
@@ -336,7 +336,11 @@ public class JettySolrRunner {
if (config.onlyHttp1) {
connector = new ServerConnector(server, new HttpConnectionFactory(configuration));
} else {
- connector = new ServerConnector(server, new HttpConnectionFactory(configuration), new HTTP2CServerConnectionFactory(configuration));
+ connector =
+ new ServerConnector(
+ server,
+ new HttpConnectionFactory(configuration),
+ new HTTP2CServerConnectionFactory(configuration));
}
}
@@ -349,7 +353,11 @@ public class JettySolrRunner {
server.setSessionIdManager(new DefaultSessionIdManager(server, new Random()));
} else {
HttpConfiguration configuration = new HttpConfiguration();
- ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory(configuration), new HTTP2CServerConnectionFactory(configuration));
+ ServerConnector connector =
+ new ServerConnector(
+ server,
+ new HttpConnectionFactory(configuration),
+ new HTTP2CServerConnectionFactory(configuration));
connector.setReuseAddress(true);
connector.setPort(port);
connector.setHost("127.0.0.1");
@@ -359,74 +367,79 @@ public class JettySolrRunner {
HandlerWrapper chain;
{
- // Initialize the servlets
- final ServletContextHandler root = new ServletContextHandler(server, config.context, ServletContextHandler.SESSIONS);
-
- server.addLifeCycleListener(new LifeCycle.Listener() {
-
- @Override
- public void lifeCycleStopping(LifeCycle arg0) {
- }
-
- @Override
- public synchronized void lifeCycleStopped(LifeCycle arg0) {
- coreContainerProvider.close();
- }
-
- @Override
- public void lifeCycleStarting(LifeCycle arg0) {
-
- }
-
- @Override
- public synchronized void lifeCycleStarted(LifeCycle arg0) {
- jettyPort = getFirstConnectorPort();
- int port = jettyPort;
- if (proxyPort != -1) port = proxyPort;
- nodeProperties.setProperty("hostPort", Integer.toString(port));
- nodeProperties.setProperty("hostContext", config.context);
-
- root.getServletContext().setAttribute(SolrDispatchFilter.PROPERTIES_ATTRIBUTE, nodeProperties);
- root.getServletContext().setAttribute(SolrDispatchFilter.SOLRHOME_ATTRIBUTE, solrHome);
- SSLConfigurationsFactory.current().init(); // normally happens in jetty-ssl.xml
- coreContainerProvider = new CoreContainerProvider();
- coreContainerProvider.init(root.getServletContext());
- log.info("Jetty properties: {}", nodeProperties);
-
- debugFilter = root.addFilter(DebugFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST) );
- extraFilters = new LinkedList<>();
- for (Map.Entry<Class<? extends Filter>, String> entry : config.extraFilters.entrySet()) {
- extraFilters.add(root.addFilter(entry.getKey(), entry.getValue(), EnumSet.of(DispatcherType.REQUEST)));
- }
-
- for (Map.Entry<ServletHolder, String> entry : config.extraServlets.entrySet()) {
- root.addServlet(entry.getKey(), entry.getValue());
- }
- dispatchFilter = root.getServletHandler().newFilterHolder(Source.EMBEDDED);
- dispatchFilter.setHeldClass(SolrDispatchFilter.class);
- dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
- // Map dispatchFilter in same path as in web.xml
- root.addFilter(dispatchFilter, "/*", EnumSet.of(DispatcherType.REQUEST));
-
- synchronized (JettySolrRunner.this) {
- waitOnSolr = true;
- JettySolrRunner.this.notify();
- }
- }
-
- @Override
- public void lifeCycleFailure(LifeCycle arg0, Throwable arg1) {
- System.clearProperty("hostPort");
- }
- });
- // Default servlet as a fall-through
- root.addServlet(Servlet404.class, "/");
- chain = root;
+ // Initialize the servlets
+ final ServletContextHandler root =
+ new ServletContextHandler(server, config.context, ServletContextHandler.SESSIONS);
+
+ server.addLifeCycleListener(
+ new LifeCycle.Listener() {
+
+ @Override
+ public void lifeCycleStopping(LifeCycle arg0) {}
+
+ @Override
+ public synchronized void lifeCycleStopped(LifeCycle arg0) {
+ coreContainerProvider.close();
+ }
+
+ @Override
+ public void lifeCycleStarting(LifeCycle arg0) {}
+
+ @Override
+ public synchronized void lifeCycleStarted(LifeCycle arg0) {
+ jettyPort = getFirstConnectorPort();
+ int port = jettyPort;
+ if (proxyPort != -1) port = proxyPort;
+ nodeProperties.setProperty("hostPort", Integer.toString(port));
+ nodeProperties.setProperty("hostContext", config.context);
+
+ root.getServletContext()
+ .setAttribute(SolrDispatchFilter.PROPERTIES_ATTRIBUTE, nodeProperties);
+ root.getServletContext()
+ .setAttribute(SolrDispatchFilter.SOLRHOME_ATTRIBUTE, solrHome);
+ SSLConfigurationsFactory.current().init(); // normally happens in jetty-ssl.xml
+ coreContainerProvider = new CoreContainerProvider();
+ coreContainerProvider.init(root.getServletContext());
+ log.info("Jetty properties: {}", nodeProperties);
+
+ debugFilter =
+ root.addFilter(DebugFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST));
+ extraFilters = new LinkedList<>();
+ for (Map.Entry<Class<? extends Filter>, String> entry :
+ config.extraFilters.entrySet()) {
+ extraFilters.add(
+ root.addFilter(
+ entry.getKey(), entry.getValue(), EnumSet.of(DispatcherType.REQUEST)));
+ }
+
+ for (Map.Entry<ServletHolder, String> entry : config.extraServlets.entrySet()) {
+ root.addServlet(entry.getKey(), entry.getValue());
+ }
+ dispatchFilter = root.getServletHandler().newFilterHolder(Source.EMBEDDED);
+ dispatchFilter.setHeldClass(SolrDispatchFilter.class);
+ dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
+ // Map dispatchFilter in same path as in web.xml
+ root.addFilter(dispatchFilter, "/*", EnumSet.of(DispatcherType.REQUEST));
+
+ synchronized (JettySolrRunner.this) {
+ waitOnSolr = true;
+ JettySolrRunner.this.notify();
+ }
+ }
+
+ @Override
+ public void lifeCycleFailure(LifeCycle arg0, Throwable arg1) {
+ System.clearProperty("hostPort");
+ }
+ });
+ // Default servlet as a fall-through
+ root.addServlet(Servlet404.class, "/");
+ chain = root;
}
chain = injectJettyHandlers(chain);
- if(config.enableV2) {
+ if (config.enableV2) {
RewriteHandler rwh = new RewriteHandler();
rwh.setHandler(chain);
rwh.setRewriteRequestURI(true);
@@ -447,17 +460,20 @@ public class JettySolrRunner {
server.setHandler(gzipHandler);
}
- /** descendants may inject own handler chaining it to the given root
- * and then returning that own one*/
+ /**
+ * descendants may inject own handler chaining it to the given root and then returning that own
+ * one
+ */
protected HandlerWrapper injectJettyHandlers(HandlerWrapper chain) {
return chain;
}
-
/**
* @return the {@link SolrDispatchFilter} for this node
*/
- public SolrDispatchFilter getSolrDispatchFilter() { return (SolrDispatchFilter) dispatchFilter.getFilter(); }
+ public SolrDispatchFilter getSolrDispatchFilter() {
+ return (SolrDispatchFilter) dispatchFilter.getFilter();
+ }
/**
* @return the {@link CoreContainer} for this node
@@ -487,8 +503,10 @@ public class JettySolrRunner {
}
public boolean isStopped() {
- return (server.isStopped() && dispatchFilter == null) || (server.isStopped() && dispatchFilter.isStopped()
- && ((QueuedThreadPool) server.getThreadPool()).isStopped());
+ return (server.isStopped() && dispatchFilter == null)
+ || (server.isStopped()
+ && dispatchFilter.isStopped()
+ && ((QueuedThreadPool) server.getThreadPool()).isStopped());
}
// ------------------------------------------------------------------------------------------------
@@ -497,7 +515,7 @@ public class JettySolrRunner {
/**
* Start the Jetty server
*
- * If the server has been started before, it will restart using the same port
+ * <p>If the server has been started before, it will restart using the same port
*
* @throws Exception if an error occurs on startup
*/
@@ -508,10 +526,8 @@ public class JettySolrRunner {
/**
* Start the Jetty server
*
- * @param reusePort when true, will start up on the same port as used by any
- * previous runs of this JettySolrRunner. If false, will use
- * the port specified by the server's JettyConfig.
- *
+ * @param reusePort when true, will start up on the same port as used by any previous runs of this
+ * JettySolrRunner. If false, will use the port specified by the server's JettyConfig.
* @throws Exception if an error occurs on startup
*/
public synchronized void start(boolean reusePort) throws Exception {
@@ -523,7 +539,6 @@ public class JettySolrRunner {
int port = reusePort && jettyPort != -1 ? jettyPort : this.config.port;
log.info("Start Jetty (configured port={}, binding port={})", this.config.port, port);
-
// if started before, make a new server
if (startedBefore) {
waitOnSolr = false;
@@ -541,7 +556,7 @@ public class JettySolrRunner {
}
synchronized (JettySolrRunner.this) {
int cnt = 0;
- while (!waitOnSolr || !dispatchFilter.isRunning() ) {
+ while (!waitOnSolr || !dispatchFilter.isRunning()) {
this.wait(100);
if (cnt++ == 15) {
throw new RuntimeException("Jetty/Solr unresponsive");
@@ -549,7 +564,8 @@ public class JettySolrRunner {
}
}
- if (config.waitForLoadingCoresToFinishMs != null && config.waitForLoadingCoresToFinishMs > 0L) {
+ if (config.waitForLoadingCoresToFinishMs != null
+ && config.waitForLoadingCoresToFinishMs > 0L) {
waitForLoadingCoresToFinish(config.waitForLoadingCoresToFinishMs);
}
@@ -564,8 +580,8 @@ public class JettySolrRunner {
}
} finally {
- started = true;
- if (prevContext != null) {
+ started = true;
+ if (prevContext != null) {
MDC.setContextMap(prevContext);
} else {
MDC.clear();
@@ -573,7 +589,6 @@ public class JettySolrRunner {
}
}
-
private void setProtocolAndHost() {
String protocol;
@@ -615,15 +630,15 @@ public class JettySolrRunner {
}
/**
- * Traverses the cause chain looking for a BindException. Returns either a bind exception
- * that was found in the chain or the original argument.
+ * Traverses the cause chain looking for a BindException. Returns either a bind exception that was
+ * found in the chain or the original argument.
*
* @param ioe An IOException that might wrap a BindException
* @return A bind exception if present otherwise ioe
*/
Exception lookForBindException(IOException ioe) {
Exception e = ioe;
- while(e.getCause() != null && !(e == e.getCause()) && ! (e instanceof BindException)) {
+ while (e.getCause() != null && !(e == e.getCause()) && !(e instanceof BindException)) {
if (e.getCause() instanceof Exception) {
e = (Exception) e.getCause();
if (e instanceof BindException) {
@@ -639,9 +654,9 @@ public class JettySolrRunner {
*
* @throws Exception if an error occurs on shutdown
*/
- public synchronized void stop() throws Exception {
+ public synchronized void stop() throws Exception {
// Do not let Jetty/Solr pollute the MDC for this thread
- Map<String,String> prevContext = MDC.getCopyOfContextMap();
+ Map<String, String> prevContext = MDC.getCopyOfContextMap();
MDC.clear();
try {
Filter filter = dispatchFilter.getFilter();
@@ -660,7 +675,7 @@ public class JettySolrRunner {
}
// stop timeout is 0, so we will interrupt right away
- while(!qtp.isStopped()) {
+ while (!qtp.isStopped()) {
qtp.stop();
if (qtp.isStopped()) {
Thread.sleep(50);
@@ -726,9 +741,19 @@ public class JettySolrRunner {
Set<String> registryNames = metricsManager.registryNames();
for (String registryName : registryNames) {
MetricRegistry metricsRegisty = metricsManager.registry(registryName);
- try (PrintStream ps = outputDirectory == null ? new NullPrintStream() : new PrintStream(new File(outputDirectory, registryName + "_" + fileName), StandardCharsets.UTF_8)) {
- ConsoleReporter reporter = ConsoleReporter.forRegistry(metricsRegisty).
- convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).filter(MetricFilter.ALL).outputTo(ps).build();
+ try (PrintStream ps =
+ outputDirectory == null
+ ? new NullPrintStream()
+ : new PrintStream(
+ new File(outputDirectory, registryName + "_" + fileName),
+ StandardCharsets.UTF_8)) {
+ ConsoleReporter reporter =
+ ConsoleReporter.forRegistry(metricsRegisty)
+ .convertRatesTo(TimeUnit.SECONDS)
+ .convertDurationsTo(TimeUnit.MILLISECONDS)
+ .filter(MetricFilter.ALL)
+ .outputTo(ps)
+ .build();
reporter.report();
}
}
@@ -742,16 +767,19 @@ public class JettySolrRunner {
if (getCoreContainer() != null) {
List<SolrCore> cores = getCoreContainer().getCores();
for (SolrCore core : cores) {
- NamedList<Object> coreStatus = CoreAdminOperation.getCoreStatus(getCoreContainer(), core.getName(), false);
- core.withSearcher(solrIndexSearcher -> {
- SimpleOrderedMap<Object> lukeIndexInfo = LukeRequestHandler.getIndexInfo(solrIndexSearcher.getIndexReader());
- Map<String,Object> indexInfoMap = coreStatus.toMap(new LinkedHashMap<>());
- indexInfoMap.putAll(lukeIndexInfo.toMap(new LinkedHashMap<>()));
- pw.println(JSONUtil.toJSON(indexInfoMap, 2));
-
- pw.println();
- return null;
- });
+ NamedList<Object> coreStatus =
+ CoreAdminOperation.getCoreStatus(getCoreContainer(), core.getName(), false);
+ core.withSearcher(
+ solrIndexSearcher -> {
+ SimpleOrderedMap<Object> lukeIndexInfo =
+ LukeRequestHandler.getIndexInfo(solrIndexSearcher.getIndexReader());
+ Map<String, Object> indexInfoMap = coreStatus.toMap(new LinkedHashMap<>());
+ indexInfoMap.putAll(lukeIndexInfo.toMap(new LinkedHashMap<>()));
+ pw.println(JSONUtil.toJSON(indexInfoMap, 2));
+
+ pw.println();
+ return null;
+ });
}
}
}
@@ -769,7 +797,6 @@ public class JettySolrRunner {
return ((ServerConnector) conns[0]).getLocalPort();
}
-
/**
* Returns the Local Port of the jetty Server.
*
@@ -782,32 +809,32 @@ public class JettySolrRunner {
/**
* Returns the Local Port of the jetty Server.
*
- * @param internalPort pass true to get the true jetty port rather than the proxy port if configured
- *
+ * @param internalPort pass true to get the true jetty port rather than the proxy port if
+ * configured
* @exception RuntimeException if there is no Connector
*/
public int getLocalPort(boolean internalPort) {
if (jettyPort == -1) {
throw new IllegalStateException("You cannot get the port until this instance has started");
}
- if (internalPort ) {
+ if (internalPort) {
return jettyPort;
}
return (proxyPort != -1) ? proxyPort : jettyPort;
}
/**
- * Sets the port of a local socket proxy that sits infront of this server; if set
- * then all client traffic will flow through the proxy, giving us the ability to
- * simulate network partitions very easily.
+ * Sets the port of a local socket proxy that sits infront of this server; if set then all client
+ * traffic will flow through the proxy, giving us the ability to simulate network partitions very
+ * easily.
*/
public void setProxyPort(int proxyPort) {
this.proxyPort = proxyPort;
}
/**
- * Returns a base URL consisting of the protocol, host, and port for a
- * Connector in use by the Jetty Server contained in this runner.
+ * Returns a base URL consisting of the protocol, host, and port for a Connector in use by the
+ * Jetty Server contained in this runner.
*/
public URL getBaseUrl() {
try {
@@ -817,17 +844,16 @@ public class JettySolrRunner {
}
}
- public URL getBaseURLV2(){
+ public URL getBaseURLV2() {
try {
return new URL(protocol, host, jettyPort, "/api");
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
-
}
/**
- * Returns a base URL consisting of the protocol, host, and port for a
- * Connector in use by the Jetty Server contained in this runner.
+ * Returns a base URL consisting of the protocol, host, and port for a Connector in use by the
+ * Jetty Server contained in this runner.
*/
public URL getProxyBaseUrl() {
try {
@@ -849,26 +875,21 @@ public class JettySolrRunner {
}
public DebugFilter getDebugFilter() {
- return (DebugFilter)debugFilter.getFilter();
+ return (DebugFilter) debugFilter.getFilter();
}
// --------------------------------------------------------------
// --------------------------------------------------------------
- /**
- * This is a stupid hack to give jetty something to attach to
- */
+ /** This is a stupid hack to give jetty something to attach to */
public static class Servlet404 extends HttpServlet {
@Override
- public void service(HttpServletRequest req, HttpServletResponse res)
- throws IOException {
+ public void service(HttpServletRequest req, HttpServletResponse res) throws IOException {
res.sendError(404, "Can not find: " + req.getRequestURI());
}
}
- /**
- * A main class that starts jetty+solr This is useful for debugging
- */
+ /** A main class that starts jetty+solr This is useful for debugging */
public static void main(String[] args) throws Exception {
JettySolrRunner jetty = new JettySolrRunner(".", "/solr", 8983);
jetty.start();
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java
index a74c745..f90ded7 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java
@@ -14,12 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
-/**
+
+/**
* SolrJ client implementations for embedded solr access.
- * <p>
- * See {@link org.apache.solr.client.solrj} for additional details.
+ *
+ * <p>See {@link org.apache.solr.client.solrj} for additional details.
*/
package org.apache.solr.client.solrj.embedded;
-
-
diff --git a/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java b/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java
index 1724b53..2b27f6d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java
@@ -17,9 +17,7 @@
package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
-
import java.util.concurrent.TimeUnit;
-
import org.apache.solr.common.util.TimeSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -27,7 +25,7 @@ import org.slf4j.LoggerFactory;
// this class may be accessed by multiple threads, but only one at a time
public class ActionThrottle {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
+
private volatile Long lastActionStartedAt;
private volatile Long minMsBetweenActions;
@@ -37,18 +35,19 @@ public class ActionThrottle {
public ActionThrottle(String name, long minMsBetweenActions) {
this(name, minMsBetweenActions, TimeSource.NANO_TIME);
}
-
+
public ActionThrottle(String name, long minMsBetweenActions, TimeSource timeSource) {
this.name = name;
this.minMsBetweenActions = minMsBetweenActions;
this.timeSource = timeSource;
}
- public ActionThrottle(String name, long minMsBetweenActions, long lastActionStartedAt) {
+ public ActionThrottle(String name, long minMsBetweenActions, long lastActionStartedAt) {
this(name, minMsBetweenActions, lastActionStartedAt, TimeSource.NANO_TIME);
}
- public ActionThrottle(String name, long minMsBetweenActions, long lastActionStartedAt, TimeSource timeSource) {
+ public ActionThrottle(
+ String name, long minMsBetweenActions, long lastActionStartedAt, TimeSource timeSource) {
this.name = name;
this.minMsBetweenActions = minMsBetweenActions;
this.lastActionStartedAt = lastActionStartedAt;
@@ -62,23 +61,24 @@ public class ActionThrottle {
public void markAttemptingAction() {
lastActionStartedAt = timeSource.getTimeNs();
}
-
+
public void minimumWaitBetweenActions() {
if (lastActionStartedAt == null) {
return;
}
long diff = timeSource.getTimeNs() - lastActionStartedAt;
int diffMs = (int) TimeUnit.MILLISECONDS.convert(diff, TimeUnit.NANOSECONDS);
- long minNsBetweenActions = TimeUnit.NANOSECONDS.convert(minMsBetweenActions, TimeUnit.MILLISECONDS);
+ long minNsBetweenActions =
+ TimeUnit.NANOSECONDS.convert(minMsBetweenActions, TimeUnit.MILLISECONDS);
log.debug("The last {} attempt started {}ms ago.", name, diffMs);
int sleep = 0;
-
+
if (diffMs > 0 && diff < minNsBetweenActions) {
sleep = (int) TimeUnit.MILLISECONDS.convert(minNsBetweenActions - diff, TimeUnit.NANOSECONDS);
} else if (diffMs == 0) {
sleep = minMsBetweenActions.intValue();
}
-
+
if (sleep > 0) {
log.info("Throttling {} attempts - waiting for {}ms", name, sleep);
try {
diff --git a/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java b/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java
index 9030c38..4fe3963 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java
@@ -20,7 +20,6 @@ import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
-
import org.apache.solr.common.SolrCloseableLatch;
import org.apache.solr.common.cloud.CollectionStateWatcher;
import org.apache.solr.common.cloud.DocCollection;
@@ -31,11 +30,12 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Watch for replicas to become {@link org.apache.solr.common.cloud.Replica.State#ACTIVE}. Watcher is
- * terminated (its {@link #onStateChanged(Set, DocCollection)} method returns false) when all listed
- * replicas become active.
- * <p>Additionally, the provided {@link SolrCloseableLatch} instance can be used to await
- * for all listed replicas to become active.</p>
+ * Watch for replicas to become {@link org.apache.solr.common.cloud.Replica.State#ACTIVE}. Watcher
+ * is terminated (its {@link #onStateChanged(Set, DocCollection)} method returns false) when all
+ * listed replicas become active.
+ *
+ * <p>Additionally, the provided {@link SolrCloseableLatch} instance can be used to await for all
+ * listed replicas to become active.
*/
public class ActiveReplicaWatcher implements CollectionStateWatcher {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -50,13 +50,18 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
/**
* Construct the watcher. At least one replicaId or solrCoreName must be provided.
+ *
* @param collection collection name
* @param replicaIds list of replica id-s
* @param solrCoreNames list of SolrCore names
- * @param latch optional latch to await for all provided replicas to become active. This latch will be
- * counted down by at most the number of provided replica id-s / SolrCore names.
+ * @param latch optional latch to await for all provided replicas to become active. This latch
+ * will be counted down by at most the number of provided replica id-s / SolrCore names.
*/
- public ActiveReplicaWatcher(String collection, List<String> replicaIds, List<String> solrCoreNames, SolrCloseableLatch latch) {
+ public ActiveReplicaWatcher(
+ String collection,
+ List<String> replicaIds,
+ List<String> solrCoreNames,
+ SolrCloseableLatch latch) {
if (replicaIds == null && solrCoreNames == null) {
throw new IllegalArgumentException("Either replicaId or solrCoreName must be provided.");
}
@@ -73,57 +78,63 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
this.latch = latch;
}
- /**
- * Collection name.
- */
+ /** Collection name. */
public String getCollection() {
return collection;
}
- /**
- * Return the list of active replicas found so far.
- */
+ /** Return the list of active replicas found so far. */
public List<Replica> getActiveReplicas() {
return activeReplicas;
}
- /**
- * Return the list of replica id-s that are not active yet (or unverified).
- */
+ /** Return the list of replica id-s that are not active yet (or unverified). */
public List<String> getReplicaIds() {
return replicaIds;
}
- /**
- * Return a list of SolrCore names that are not active yet (or unverified).
- */
+ /** Return a list of SolrCore names that are not active yet (or unverified). */
public List<String> getSolrCoreNames() {
return solrCoreNames;
}
@Override
public String toString() {
- return "ActiveReplicaWatcher@" + Long.toHexString(hashCode()) + "{" +
- "collection='" + collection + '\'' +
- ", replicaIds=" + replicaIds +
- ", solrCoreNames=" + solrCoreNames +
- ", latch=" + (latch != null ? latch.getCount() : "null") + "," +
- ", activeReplicas=" + activeReplicas +
- '}';
+ return "ActiveReplicaWatcher@"
+ + Long.toHexString(hashCode())
+ + "{"
+ + "collection='"
+ + collection
+ + '\''
+ + ", replicaIds="
+ + replicaIds
+ + ", solrCoreNames="
+ + solrCoreNames
+ + ", latch="
+ + (latch != null ? latch.getCount() : "null")
+ + ","
+ + ", activeReplicas="
+ + activeReplicas
+ + '}';
}
// synchronized due to SOLR-11535
@Override
public synchronized boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
if (log.isDebugEnabled()) {
- log.debug("-- onStateChanged@{}: replicaIds={}, solrCoreNames={} {}\ncollectionState {}"
- , Long.toHexString(hashCode()), replicaIds, solrCoreNames
- , (latch != null ? "\nlatch count=" + latch.getCount() : "")
- , collectionState); // nowarn
+ log.debug(
+ "-- onStateChanged@{}: replicaIds={}, solrCoreNames={} {}\ncollectionState {}",
+ Long.toHexString(hashCode()),
+ replicaIds,
+ solrCoreNames,
+ (latch != null ? "\nlatch count=" + latch.getCount() : ""),
+ collectionState); // nowarn
}
if (collectionState == null) { // collection has been deleted - don't wait
if (log.isDebugEnabled()) {
- log.debug("-- collection deleted, decrementing latch by {} ", replicaIds.size() + solrCoreNames.size()); // nowarn
+ log.debug(
+ "-- collection deleted, decrementing latch by {} ",
+ replicaIds.size() + solrCoreNames.size()); // nowarn
}
if (latch != null) {
for (int i = 0; i < replicaIds.size() + solrCoreNames.size(); i++) {
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
index b00ca52..3fab10c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
@@ -16,19 +16,16 @@
*/
package org.apache.solr.cloud;
+import com.google.common.base.Strings;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
-
-import com.google.common.base.Strings;
import org.apache.solr.common.StringUtils;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.util.PropertiesUtil;
import org.apache.solr.core.CoreDescriptor;
-/**
- * SolrCloud metadata attached to a {@link CoreDescriptor}.
- */
+/** SolrCloud metadata attached to a {@link CoreDescriptor}. */
public class CloudDescriptor {
private final CoreDescriptor cd; // back-reference
@@ -38,35 +35,31 @@ public class CloudDescriptor {
private String roles = null;
private Integer numShards;
private String nodeName = null;
- private Map<String,String> collectionParams = new HashMap<>();
+ private Map<String, String> collectionParams = new HashMap<>();
private volatile boolean isLeader = false;
-
+
// set to true once a core has registered in zk
// set to false on detecting a session expiration
private volatile boolean hasRegistered = false;
private volatile Replica.State lastPublished = Replica.State.ACTIVE;
public static final String NUM_SHARDS = "numShards";
-
+
public static final String REPLICA_TYPE = "replicaType";
-
- /**
- * The type of replica this core hosts
- */
+
+ /** The type of replica this core hosts */
private final Replica.Type replicaType;
public CloudDescriptor(CoreDescriptor cd, String coreName, Properties props) {
this.cd = cd;
this.shardId = props.getProperty(CoreDescriptor.CORE_SHARD, null);
- if (Strings.isNullOrEmpty(shardId))
- this.shardId = null;
+ if (Strings.isNullOrEmpty(shardId)) this.shardId = null;
// If no collection name is specified, we default to the core name
this.collectionName = props.getProperty(CoreDescriptor.CORE_COLLECTION, coreName);
this.roles = props.getProperty(CoreDescriptor.CORE_ROLES, null);
this.nodeName = props.getProperty(CoreDescriptor.CORE_NODE_NAME);
- if (Strings.isNullOrEmpty(nodeName))
- this.nodeName = null;
+ if (Strings.isNullOrEmpty(nodeName)) this.nodeName = null;
this.numShards = PropertiesUtil.toInteger(props.getProperty(CloudDescriptor.NUM_SHARDS), null);
String replicaTypeStr = props.getProperty(CloudDescriptor.REPLICA_TYPE);
if (Strings.isNullOrEmpty(replicaTypeStr)) {
@@ -76,15 +69,17 @@ public class CloudDescriptor {
}
for (String propName : props.stringPropertyNames()) {
if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
- collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), props.getProperty(propName));
+ collectionParams.put(
+ propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()),
+ props.getProperty(propName));
}
}
}
-
+
public boolean requiresTransactionLog() {
return this.replicaType != Replica.Type.PULL;
}
-
+
public Replica.State getLastPublished() {
return lastPublished;
}
@@ -96,15 +91,15 @@ public class CloudDescriptor {
public boolean isLeader() {
return isLeader;
}
-
+
public void setLeader(boolean isLeader) {
this.isLeader = isLeader;
}
-
+
public boolean hasRegistered() {
return hasRegistered;
}
-
+
public void setHasRegistered(boolean hasRegistered) {
this.hasRegistered = hasRegistered;
}
@@ -112,11 +107,11 @@ public class CloudDescriptor {
public void setShardId(String shardId) {
this.shardId = shardId;
}
-
+
public String getShardId() {
return shardId;
}
-
+
public String getCollectionName() {
return collectionName;
}
@@ -125,14 +120,14 @@ public class CloudDescriptor {
this.collectionName = collectionName;
}
- public String getRoles(){
+ public String getRoles() {
return roles;
}
-
- public void setRoles(String roles){
+
+ public void setRoles(String roles) {
this.roles = roles;
}
-
+
/** Optional parameters that can change how a core is created. */
public Map<String, String> getParams() {
return collectionParams;
@@ -142,31 +137,39 @@ public class CloudDescriptor {
public Integer getNumShards() {
return numShards;
}
-
+
public void setNumShards(int numShards) {
this.numShards = numShards;
}
-
+
public String getCoreNodeName() {
return nodeName;
}
public void setCoreNodeName(String nodeName) {
this.nodeName = nodeName;
- if(nodeName==null) cd.getPersistableStandardProperties().remove(CoreDescriptor.CORE_NODE_NAME);
+ if (nodeName == null)
+ cd.getPersistableStandardProperties().remove(CoreDescriptor.CORE_NODE_NAME);
else cd.getPersistableStandardProperties().setProperty(CoreDescriptor.CORE_NODE_NAME, nodeName);
}
public void reload(CloudDescriptor reloadFrom) {
if (reloadFrom == null) return;
- setShardId(StringUtils.isEmpty(reloadFrom.getShardId()) ? getShardId() : reloadFrom.getShardId());
- setCollectionName(StringUtils.isEmpty(reloadFrom.getCollectionName()) ? getCollectionName() : reloadFrom.getCollectionName());
+ setShardId(
+ StringUtils.isEmpty(reloadFrom.getShardId()) ? getShardId() : reloadFrom.getShardId());
+ setCollectionName(
+ StringUtils.isEmpty(reloadFrom.getCollectionName())
+ ? getCollectionName()
+ : reloadFrom.getCollectionName());
setRoles(StringUtils.isEmpty(reloadFrom.getRoles()) ? getRoles() : reloadFrom.getRoles());
if (reloadFrom.getNumShards() != null) {
setNumShards(reloadFrom.getNumShards());
}
- setCoreNodeName(StringUtils.isEmpty(reloadFrom.getCoreNodeName()) ? getCoreNodeName() : reloadFrom.getCoreNodeName());
+ setCoreNodeName(
+ StringUtils.isEmpty(reloadFrom.getCoreNodeName())
+ ? getCoreNodeName()
+ : reloadFrom.getCoreNodeName());
setLeader(reloadFrom.isLeader);
setHasRegistered(reloadFrom.hasRegistered);
setLastPublished(reloadFrom.getLastPublished());
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index b4e5452..7ee6ecc 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
-
import org.apache.commons.io.FileUtils;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.common.SolrException;
@@ -48,15 +47,14 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
public class CloudUtil {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final int DEFAULT_TIMEOUT = 90;
/**
- * See if coreNodeName has been taken over by another baseUrl and unload core
- * + throw exception if it has been.
+ * See if coreNodeName has been taken over by another baseUrl and unload core + throw exception if
+ * it has been.
*/
public static void checkSharedFSFailoverReplaced(CoreContainer cc, CoreDescriptor desc) {
if (!cc.isSharedFs(desc)) return;
@@ -65,12 +63,18 @@ public class CloudUtil {
String thisCnn = zkController.getCoreNodeName(desc);
String thisBaseUrl = zkController.getBaseUrl();
- log.debug("checkSharedFSFailoverReplaced running for coreNodeName={} baseUrl={}", thisCnn, thisBaseUrl);
+ log.debug(
+ "checkSharedFSFailoverReplaced running for coreNodeName={} baseUrl={}",
+ thisCnn,
+ thisBaseUrl);
// if we see our core node name on a different base url, unload
- final DocCollection docCollection = zkController.getClusterState().getCollectionOrNull(desc.getCloudDescriptor().getCollectionName());
+ final DocCollection docCollection =
+ zkController
+ .getClusterState()
+ .getCollectionOrNull(desc.getCloudDescriptor().getCollectionName());
if (docCollection != null && docCollection.getSlicesMap() != null) {
- Map<String,Slice> slicesMap = docCollection.getSlicesMap();
+ Map<String, Slice> slicesMap = docCollection.getSlicesMap();
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
@@ -78,8 +82,7 @@ public class CloudUtil {
String baseUrl = replica.getBaseUrl();
log.debug("compare against coreNodeName={} baseUrl={}", cnn, baseUrl);
- if (thisCnn != null && thisCnn.equals(cnn)
- && !thisBaseUrl.equals(baseUrl)) {
+ if (thisCnn != null && thisCnn.equals(cnn) && !thisBaseUrl.equals(baseUrl)) {
if (cc.isLoaded(desc.getName())) {
cc.unload(desc.getName());
}
@@ -87,14 +90,24 @@ public class CloudUtil {
try {
FileUtils.deleteDirectory(desc.getInstanceDir().toFile());
} catch (IOException e) {
- SolrException.log(log, "Failed to delete instance dir for core:"
- + desc.getName() + " dir:" + desc.getInstanceDir());
+ SolrException.log(
+ log,
+ "Failed to delete instance dir for core:"
+ + desc.getName()
+ + " dir:"
+ + desc.getInstanceDir());
}
- log.error("{}",
- new SolrException(ErrorCode.SERVER_ERROR, "Will not load SolrCore " + desc.getName()
- + " because it has been replaced due to failover.")); // nowarn
- throw new SolrException(ErrorCode.SERVER_ERROR,
- "Will not load SolrCore " + desc.getName()
+ log.error(
+ "{}",
+ new SolrException(
+ ErrorCode.SERVER_ERROR,
+ "Will not load SolrCore "
+ + desc.getName()
+ + " because it has been replaced due to failover.")); // nowarn
+ throw new SolrException(
+ ErrorCode.SERVER_ERROR,
+ "Will not load SolrCore "
+ + desc.getName()
+ " because it has been replaced due to failover.");
}
}
@@ -102,7 +115,8 @@ public class CloudUtil {
}
}
- public static boolean replicaExists(ClusterState clusterState, String collection, String shard, String coreNodeName) {
+ public static boolean replicaExists(
+ ClusterState clusterState, String collection, String shard, String coreNodeName) {
DocCollection docCollection = clusterState.getCollectionOrNull(collection);
if (docCollection != null) {
Slice slice = docCollection.getSlice(shard);
@@ -116,84 +130,97 @@ public class CloudUtil {
/**
* Returns a displayable unified path to the given resource. For non-solrCloud that will be the
* same as getConfigDir, but for Cloud it will be getConfigSetZkPath ending in a /
- * <p>
- * <b>Note:</b> Do not use this to generate a valid file path, but for debug printing etc
+ *
+ * <p><b>Note:</b> Do not use this to generate a valid file path, but for debug printing etc
+ *
* @param loader Resource loader instance
* @return a String of path to resource
*/
public static String unifiedResourcePath(SolrResourceLoader loader) {
- return (loader instanceof ZkSolrResourceLoader) ?
- ((ZkSolrResourceLoader) loader).getConfigSetZkPath() + "/" :
- loader.getConfigPath() + "/";
+ return (loader instanceof ZkSolrResourceLoader)
+ ? ((ZkSolrResourceLoader) loader).getConfigSetZkPath() + "/"
+ : loader.getConfigPath() + "/";
}
- /**Read the list of public keys from ZK
- */
-
+ /** Read the list of public keys from ZK */
public static Map<String, byte[]> getTrustedKeys(SolrZkClient zk, String dir) {
Map<String, byte[]> result = new HashMap<>();
try {
List<String> children = zk.getChildren("/keys/" + dir, null, true);
for (String key : children) {
- if (key.endsWith(".der")) result.put(key, zk.getData("/keys/" + dir +
- "/" + key, null, null, true));
+ if (key.endsWith(".der"))
+ result.put(key, zk.getData("/keys/" + dir + "/" + key, null, null, true));
}
} catch (KeeperException.NoNodeException e) {
log.info("Error fetching key names");
return Collections.emptyMap();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- throw new SolrException(ErrorCode.SERVER_ERROR,"Unable to read crypto keys",e );
+ throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to read crypto keys", e);
} catch (KeeperException e) {
- throw new SolrException(ErrorCode.SERVER_ERROR,"Unable to read crypto keys",e );
+ throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to read crypto keys", e);
}
return result;
-
}
/**
* Wait for a particular collection state to appear.
*
- * This is a convenience method using the {@link #DEFAULT_TIMEOUT}
+ * <p>This is a convenience method using the {@link #DEFAULT_TIMEOUT}
*
* @param cloudManager current instance of {@link SolrCloudManager}
- * @param message a message to report on failure
- * @param collection the collection to watch
- * @param predicate a predicate to match against the collection state
+ * @param message a message to report on failure
+ * @param collection the collection to watch
+ * @param predicate a predicate to match against the collection state
*/
- public static long waitForState(final SolrCloudManager cloudManager,
- final String message,
- final String collection,
- final CollectionStatePredicate predicate) {
+ public static long waitForState(
+ final SolrCloudManager cloudManager,
+ final String message,
+ final String collection,
+ final CollectionStatePredicate predicate) {
AtomicReference<DocCollection> state = new AtomicReference<>();
AtomicReference<Set<String>> liveNodesLastSeen = new AtomicReference<>();
try {
- return waitForState(cloudManager, collection, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> {
- state.set(c);
- liveNodesLastSeen.set(n);
- return predicate.matches(n, c);
- });
+ return waitForState(
+ cloudManager,
+ collection,
+ DEFAULT_TIMEOUT,
+ TimeUnit.SECONDS,
+ (n, c) -> {
+ state.set(c);
+ liveNodesLastSeen.set(n);
+ return predicate.matches(n, c);
+ });
} catch (Exception e) {
- throw new AssertionError(message + "\n" + "Live Nodes: " + liveNodesLastSeen.get() + "\nLast available state: " + state.get(), e);
+ throw new AssertionError(
+ message
+ + "\n"
+ + "Live Nodes: "
+ + liveNodesLastSeen.get()
+ + "\nLast available state: "
+ + state.get(),
+ e);
}
}
/**
* Wait for a particular collection state to appear.
*
- * This is a convenience method using the {@link #DEFAULT_TIMEOUT}
+ * <p>This is a convenience method using the {@link #DEFAULT_TIMEOUT}
*
* @param cloudManager current instance of {@link SolrCloudManager}
- * @param collection the collection to watch
+ * @param collection the collection to watch
* @param wait timeout value
* @param unit timeout unit
- * @param predicate a predicate to match against the collection state
+ * @param predicate a predicate to match against the collection state
*/
- public static long waitForState(final SolrCloudManager cloudManager,
- final String collection,
- long wait,
- final TimeUnit unit,
- final CollectionStatePredicate predicate) throws InterruptedException, TimeoutException, IOException {
+ public static long waitForState(
+ final SolrCloudManager cloudManager,
+ final String collection,
+ long wait,
+ final TimeUnit unit,
+ final CollectionStatePredicate predicate)
+ throws InterruptedException, TimeoutException, IOException {
TimeOut timeout = new TimeOut(wait, unit, cloudManager.getTimeSource());
long timeWarn = timeout.timeLeft(TimeUnit.MILLISECONDS) / 4;
ClusterState state = null;
@@ -222,6 +249,7 @@ public class CloudUtil {
/**
* Return a {@link CollectionStatePredicate} that returns true if a collection has the expected
* number of active shards and replicas
+ *
* @param expectedShards expected number of active shards
* @param expectedReplicas expected number of active replicas
*/
@@ -232,30 +260,41 @@ public class CloudUtil {
/**
* Return a {@link CollectionStatePredicate} that returns true if a collection has the expected
* number of shards and replicas.
- * <p>Note: for shards marked as inactive the current Solr behavior is that replicas remain active.
+ *
+ * <p>Note: for shards marked as inactive the current Solr behavior is that replicas remain
+ * active.
+ *
* @param expectedShards expected number of shards
* @param expectedReplicas expected number of active replicas per shard
* @param withInactive if true then count also inactive shards
* @param requireLeaders if true then require that each shard has a leader
*/
- public static CollectionStatePredicate clusterShape(int expectedShards, int expectedReplicas, boolean withInactive,
- boolean requireLeaders) {
+ public static CollectionStatePredicate clusterShape(
+ int expectedShards, int expectedReplicas, boolean withInactive, boolean requireLeaders) {
return (liveNodes, collectionState) -> {
if (collectionState == null) {
log.debug("-- null collection");
return false;
}
- Collection<Slice> slices = withInactive ? collectionState.getSlices() : collectionState.getActiveSlices();
+ Collection<Slice> slices =
+ withInactive ? collectionState.getSlices() : collectionState.getActiveSlices();
if (slices.size() != expectedShards) {
if (log.isDebugEnabled()) {
- log.debug("-- wrong number of slices for collection {}, expected={}, found={}: {}", collectionState.getName(), expectedShards, collectionState.getSlices().size(), collectionState.getSlices());
+ log.debug(
+ "-- wrong number of slices for collection {}, expected={}, found={}: {}",
+ collectionState.getName(),
+ expectedShards,
+ collectionState.getSlices().size(),
+ collectionState.getSlices());
}
return false;
}
Set<String> leaderless = new HashSet<>();
for (Slice slice : slices) {
int activeReplicas = 0;
- if (requireLeaders && slice.getState() != Slice.State.INACTIVE && slice.getLeader() == null) {
+ if (requireLeaders
+ && slice.getState() != Slice.State.INACTIVE
+ && slice.getLeader() == null) {
leaderless.add(slice.getName());
continue;
}
@@ -264,12 +303,16 @@ public class CloudUtil {
continue;
}
for (Replica replica : slice) {
- if (replica.isActive(liveNodes))
- activeReplicas++;
+ if (replica.isActive(liveNodes)) activeReplicas++;
}
if (activeReplicas != expectedReplicas) {
if (log.isDebugEnabled()) {
- log.debug("-- wrong number of active replicas for collection {} in slice {}, expected={}, found={}", collectionState.getName(), slice.getName(), expectedReplicas, activeReplicas);
+ log.debug(
+ "-- wrong number of active replicas for collection {} in slice {}, expected={}, found={}",
+ collectionState.getName(),
+ slice.getName(),
+ expectedReplicas,
+ activeReplicas);
}
return false;
}
@@ -284,7 +327,8 @@ public class CloudUtil {
}
/**
- * Builds a string with sorted {@link CoreContainer#getLoadedCoreNames()} while truncating to the first 20 cores.
+ * Builds a string with sorted {@link CoreContainer#getLoadedCoreNames()} while truncating to the
+ * first 20 cores.
*/
static String getLoadedCoreNamesAsString(CoreContainer coreContainer) {
List<String> loadedCoreNames = coreContainer.getLoadedCoreNames();
@@ -292,6 +336,8 @@ public class CloudUtil {
loadedCoreNames.sort(null);
}
return loadedCoreNames.stream().limit(20).collect(Collectors.toList())
- + (loadedCoreNames.size() > 20 ? "...(truncated from " + loadedCoreNames.size() + " cores)" : "");
+ + (loadedCoreNames.size() > 20
+ ? "...(truncated from " + loadedCoreNames.size() + " cores)"
+ : "");
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/ClusterSingleton.java b/solr/core/src/java/org/apache/solr/cloud/ClusterSingleton.java
index 95357e2..8cbe939 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ClusterSingleton.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ClusterSingleton.java
@@ -18,20 +18,23 @@ package org.apache.solr.cloud;
/**
* Intended for components that should be enabled only one instance per cluster.
+ *
* <p>Components that implement this interface are always in one of these states:
+ *
* <ul>
- * <li>STOPPED - the default state. The component is idle and does not perform
- * any functions. It should also avoid holding any resources.</li>
- * <li>STARTING - transitional state, which leads either to RUNNING or STOPPING in
- * case of startup failures.</li>
- * <li>STOPPING - transitional state, which leads to STOPPED state.</li>
- * <li>RUNNING - the component is active.</li>
+ * <li>STOPPED - the default state. The component is idle and does not perform any functions. It
+ * should also avoid holding any resources.
+ * <li>STARTING - transitional state, which leads either to RUNNING or STOPPING in case of startup
+ * failures.
+ * <li>STOPPING - transitional state, which leads to STOPPED state.
+ * <li>RUNNING - the component is active.
* </ul>
- * <p>Components must be prepared to change these states multiple times in their
- * life-cycle.</p>
- * <p>Implementation detail: currently these components are instantiated on all nodes
- * but they are started only on the Overseer leader, and stopped when the current
- * node loses its Overseer leadership.</p>
+ *
+ * <p>Components must be prepared to change these states multiple times in their life-cycle.
+ *
+ * <p>Implementation detail: currently these components are instantiated on all nodes but they are
+ * started only on the Overseer leader, and stopped when the current node loses its Overseer
+ * leadership.
*/
public interface ClusterSingleton {
@@ -46,28 +49,24 @@ public interface ClusterSingleton {
STOPPING
}
- /**
- * Unique name of this singleton. Used for registration.
- */
+ /** Unique name of this singleton. Used for registration. */
String getName();
/**
- * Start the operation of the component. Initially this method should set
- * the state to STARTING, and on success it should set the state to RUNNING.
- * @throws Exception on startup errors. The component should revert to the
- * STOPPED state.
+ * Start the operation of the component. Initially this method should set the state to STARTING,
+ * and on success it should set the state to RUNNING.
+ *
+ * @throws Exception on startup errors. The component should revert to the STOPPED state.
*/
void start() throws Exception;
- /**
- * Returns the current state of the component.
- */
+ /** Returns the current state of the component. */
State getState();
/**
- * Stop the operation of the component. Initially this method should set
- * the state to STOPPING, and on return it should set the state to STOPPED.
- * Components should also avoid holding any resource when in STOPPED state.
+ * Stop the operation of the component. Initially this method should set the state to STOPPING,
+ * and on return it should set the state to STOPPED. Components should also avoid holding any
+ * resource when in STOPPED state.
*/
void stop();
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/ConfigSetApiLockFactory.java b/solr/core/src/java/org/apache/solr/cloud/ConfigSetApiLockFactory.java
index 7200969..75dab9e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ConfigSetApiLockFactory.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ConfigSetApiLockFactory.java
@@ -16,11 +16,13 @@
*/
package org.apache.solr.cloud;
+
import java.util.ArrayList;
import java.util.List;
/**
- * This class implements a higher level locking abstraction for the Config Set API using lower level read and write locks.
+ * This class implements a higher level locking abstraction for the Config Set API using lower level
+ * read and write locks.
*/
public class ConfigSetApiLockFactory {
@@ -31,17 +33,19 @@ public class ConfigSetApiLockFactory {
}
/**
- * For the {@link org.apache.solr.common.params.CollectionParams.LockLevel} of the passed {@code action}, obtains the
- * required locks (if any) and returns.<p>
+ * For the {@link org.apache.solr.common.params.CollectionParams.LockLevel} of the passed {@code
+ * action}, obtains the required locks (if any) and returns.
*
- * This method obtains a write lock on {@code configSetName} as well as (when not {@code null}), a read lock on {@code baseConfigSetName}.
+ * <p>This method obtains a write lock on {@code configSetName} as well as (when not {@code
+ * null}), a read lock on {@code baseConfigSetName}.
*
- * @return a lock that once {@link DistributedMultiLock#isAcquired()} guarantees the corresponding Config Set API command
- * can execute safely.
- * The returned lock <b>MUST</b> be {@link DistributedMultiLock#release()} no matter what once no longer needed as otherwise it would
- * prevent other threads from locking.
+ * @return a lock that once {@link DistributedMultiLock#isAcquired()} guarantees the corresponding
+ * Config Set API command can execute safely. The returned lock <b>MUST</b> be {@link
+ * DistributedMultiLock#release()} no matter what once no longer needed as otherwise it would
+ * prevent other threads from locking.
*/
- public DistributedMultiLock createConfigSetApiLock(String configSetName, String baseConfigSetName) {
+ public DistributedMultiLock createConfigSetApiLock(
+ String configSetName, String baseConfigSetName) {
List<DistributedLock> locks = new ArrayList<>(2);
locks.add(lockFactory.createLock(true, configSetName));
diff --git a/solr/core/src/java/org/apache/solr/cloud/ConfigSetCmds.java b/solr/core/src/java/org/apache/solr/cloud/ConfigSetCmds.java
index 0dc9b26..e3133a4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ConfigSetCmds.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ConfigSetCmds.java
@@ -17,6 +17,12 @@
package org.apache.solr.cloud;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.CREATE;
+import static org.apache.solr.common.util.Utils.toJSONString;
+import static org.apache.solr.handler.admin.ConfigSetsHandler.DEFAULT_CONFIGSET_NAME;
+
+import com.jayway.jsonpath.internal.Utils;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
@@ -24,8 +30,6 @@ import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
-
-import com.jayway.jsonpath.internal.Utils;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -38,42 +42,36 @@ import org.apache.solr.core.CoreContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.CREATE;
-import static org.apache.solr.common.util.Utils.toJSONString;
-import static org.apache.solr.handler.admin.ConfigSetsHandler.DEFAULT_CONFIGSET_NAME;
-
/**
- * This class contains methods dealing with Config Sets and called for Config Set API execution, called
- * from the {@link OverseerConfigSetMessageHandler} or from
- * {@link org.apache.solr.cloud.api.collections.DistributedCollectionConfigSetCommandRunner#runConfigSetCommand} depending
- * on whether Collection and Config Set APIs are Overseer based or distributed.
+ * This class contains methods dealing with Config Sets and called for Config Set API execution,
+ * called from the {@link OverseerConfigSetMessageHandler} or from {@link
+ * org.apache.solr.cloud.api.collections.DistributedCollectionConfigSetCommandRunner#runConfigSetCommand}
+ * depending on whether Collection and Config Set APIs are Overseer based or distributed.
*/
public class ConfigSetCmds {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- /**
- * Name of the ConfigSet to copy from for CREATE
- */
+ /** Name of the ConfigSet to copy from for CREATE */
public static final String BASE_CONFIGSET = "baseConfigSet";
- /**
- * Prefix for properties that should be applied to the ConfigSet for CREATE
- */
+ /** Prefix for properties that should be applied to the ConfigSet for CREATE */
public static final String CONFIG_SET_PROPERTY_PREFIX = "configSetProp.";
- public static String getBaseConfigSetName(ConfigSetParams.ConfigSetAction action, String baseConfigSetName) {
+ public static String getBaseConfigSetName(
+ ConfigSetParams.ConfigSetAction action, String baseConfigSetName) {
if (action == CREATE) {
return Utils.isEmpty(baseConfigSetName) ? DEFAULT_CONFIGSET_NAME : baseConfigSetName;
}
return null;
}
-
- private static NamedList<Object> getConfigSetProperties(ConfigSetService configSetService, String configName, String propertyPath) throws IOException {
+ private static NamedList<Object> getConfigSetProperties(
+ ConfigSetService configSetService, String configName, String propertyPath)
+ throws IOException {
byte[] oldPropsData = configSetService.downloadFileFromConfig(configName, propertyPath);
if (oldPropsData != null) {
- InputStreamReader reader = new InputStreamReader(new ByteArrayInputStream(oldPropsData), StandardCharsets.UTF_8);
+ InputStreamReader reader =
+ new InputStreamReader(new ByteArrayInputStream(oldPropsData), StandardCharsets.UTF_8);
try {
return ConfigSetProperties.readFromInputStream(reader);
} finally {
@@ -90,8 +88,8 @@ public class ConfigSetCmds {
if (properties == null) {
properties = new HashMap<String, Object>();
}
- properties.put(entry.getKey().substring((CONFIG_SET_PROPERTY_PREFIX).length()),
- entry.getValue());
+ properties.put(
+ entry.getKey().substring((CONFIG_SET_PROPERTY_PREFIX).length()), entry.getValue());
}
}
return properties;
@@ -109,14 +107,16 @@ public class ConfigSetCmds {
if (newProps != null) {
String propertyDataStr = toJSONString(newProps);
if (propertyDataStr == null) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid property specification");
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST, "Invalid property specification");
}
return propertyDataStr.getBytes(StandardCharsets.UTF_8);
}
return null;
}
- public static void createConfigSet(ZkNodeProps message, CoreContainer coreContainer) throws IOException {
+ public static void createConfigSet(ZkNodeProps message, CoreContainer coreContainer)
+ throws IOException {
String configSetName = message.getStr(NAME);
if (configSetName == null || configSetName.length() == 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ConfigSet name not specified");
@@ -125,12 +125,14 @@ public class ConfigSetCmds {
String baseConfigSetName = message.getStr(BASE_CONFIGSET, DEFAULT_CONFIGSET_NAME);
if (coreContainer.getConfigSetService().checkConfigExists(configSetName)) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ConfigSet already exists: " + configSetName);
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST, "ConfigSet already exists: " + configSetName);
}
// is there a base config that already exists
if (!coreContainer.getConfigSetService().checkConfigExists(baseConfigSetName)) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
"Base ConfigSet does not exist: " + baseConfigSetName);
}
@@ -138,7 +140,9 @@ public class ConfigSetCmds {
Map<String, Object> props = getNewProperties(message);
if (props != null) {
// read the old config properties and do a merge, if necessary
- NamedList<Object> oldProps = getConfigSetProperties(coreContainer.getConfigSetService(), baseConfigSetName, propertyPath);
+ NamedList<Object> oldProps =
+ getConfigSetProperties(
+ coreContainer.getConfigSetService(), baseConfigSetName, propertyPath);
if (oldProps != null) {
mergeOldProperties(props, oldProps);
}
@@ -148,7 +152,9 @@ public class ConfigSetCmds {
try {
coreContainer.getConfigSetService().copyConfig(baseConfigSetName, configSetName);
if (propertyData != null) {
- coreContainer.getConfigSetService().uploadFileToConfig(configSetName, propertyPath, propertyData, true);
+ coreContainer
+ .getConfigSetService()
+ .uploadFileToConfig(configSetName, propertyPath, propertyData, true);
}
} catch (Exception e) {
// copying the config dir or writing the properties file may have failed.
@@ -167,7 +173,8 @@ public class ConfigSetCmds {
}
}
- public static void deleteConfigSet(ZkNodeProps message, CoreContainer coreContainer) throws IOException {
+ public static void deleteConfigSet(ZkNodeProps message, CoreContainer coreContainer)
+ throws IOException {
String configSetName = message.getStr(NAME);
if (configSetName == null || configSetName.length() == 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ConfigSet name not specified");
@@ -176,23 +183,32 @@ public class ConfigSetCmds {
deleteConfigSet(configSetName, false, coreContainer);
}
- private static void deleteConfigSet(String configSetName, boolean force, CoreContainer coreContainer) throws IOException {
+ private static void deleteConfigSet(
+ String configSetName, boolean force, CoreContainer coreContainer) throws IOException {
ZkStateReader zkStateReader = coreContainer.getZkController().getZkStateReader();
- for (Map.Entry<String, DocCollection> entry : zkStateReader.getClusterState().getCollectionsMap().entrySet()) {
+ for (Map.Entry<String, DocCollection> entry :
+ zkStateReader.getClusterState().getCollectionsMap().entrySet()) {
String configName = entry.getValue().getConfigName();
if (configSetName.equals(configName))
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Can not delete ConfigSet as it is currently being used by collection [" + entry.getKey() + "]");
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Can not delete ConfigSet as it is currently being used by collection ["
+ + entry.getKey()
+ + "]");
}
String propertyPath = ConfigSetProperties.DEFAULT_FILENAME;
- NamedList<Object> properties = getConfigSetProperties(coreContainer.getConfigSetService(), configSetName, propertyPath);
+ NamedList<Object> properties =
+ getConfigSetProperties(coreContainer.getConfigSetService(), configSetName, propertyPath);
if (properties != null) {
Object immutable = properties.get(ConfigSetProperties.IMMUTABLE_CONFIGSET_ARG);
- boolean isImmutableConfigSet = immutable != null ? Boolean.parseBoolean(immutable.toString()) : false;
+ boolean isImmutableConfigSet =
+ immutable != null ? Boolean.parseBoolean(immutable.toString()) : false;
if (!force && isImmutableConfigSet) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Requested delete of immutable ConfigSet: " + configSetName);
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Requested delete of immutable ConfigSet: " + configSetName);
}
}
coreContainer.getConfigSetService().deleteConfig(configSetName);
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedApiAsyncTracker.java b/solr/core/src/java/org/apache/solr/cloud/DistributedApiAsyncTracker.java
index 8cf4f8d..8ebdb2a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedApiAsyncTracker.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedApiAsyncTracker.java
@@ -17,11 +17,12 @@
package org.apache.solr.cloud;
+import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
+
+import com.google.common.annotations.VisibleForTesting;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
-
-import com.google.common.annotations.VisibleForTesting;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.cloud.api.collections.DistributedCollectionConfigSetCommandRunner;
import org.apache.solr.common.SolrException;
@@ -34,21 +35,21 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
-
/**
- * Class holding the implementation required for tracking asynchronous Collection API (or other) tasks when the Collection
- * API is distributed.<p>
+ * Class holding the implementation required for tracking asynchronous Collection API (or other)
+ * tasks when the Collection API is distributed.
*
- * This replaces the features provided by the distributed maps on ZK paths /overseer/collection-map-completed,
- * /overseer/collection-map-failure and /overseer/async_ids when the Collection API commands are handled by the Overseer.<p>
+ * <p>This replaces the features provided by the distributed maps on ZK paths
+ * /overseer/collection-map-completed, /overseer/collection-map-failure and /overseer/async_ids when
+ * the Collection API commands are handled by the Overseer.
*
- * It works by using two Zookeeper directories, one for persistent nodes for each new async id and one for ephemeral nodes
- * for each async id currently being processed (in flight).<br>
+ * <p>It works by using two Zookeeper directories, one for persistent nodes for each new async id
+ * and one for ephemeral nodes for each async id currently being processed (in flight).<br>
* A persistent async node has either no data, or has a serialized OverseerSolrResponse as content.
- * An ephemeral async node has two possible states (content): 'S' or 'R'.<p>
+ * An ephemeral async node has two possible states (content): 'S' or 'R'.
+ *
+ * <p>The actual state of an async task is built from a combination of the two nodes:
*
- * The actual state of an async task is built from a combination of the two nodes:
* <pre>
* +===================+=========================================+=================================================+====================+
* | | persistent=success OverseerSolrResponse | persistent=null or failed OverseerSolrResponse | No persistent node |
@@ -65,8 +66,9 @@ public class DistributedApiAsyncTracker {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
/**
- * Corresponds to Overseer.NUM_RESPONSES_TO_STORE. The size of the persistent store of async ID's put in Zookeeper.
- * This is the max total tracked async request ID's over all nodes running in the distributed Collection API.
+ * Corresponds to Overseer.NUM_RESPONSES_TO_STORE. The size of the persistent store of async ID's
+ * put in Zookeeper. This is the max total tracked async request ID's over all nodes running in
+ * the distributed Collection API.
*/
public static int MAX_TRACKED_ASYNC_TASKS = 10000;
@@ -77,8 +79,8 @@ public class DistributedApiAsyncTracker {
private final String inFlightIdsPath;
/**
- * Persistent storage in Zookeeper under path {@link #ZK_ASYNC_PERSISTENT} of all currently known (in flight, completed
- * with success or error) async request id's.
+ * Persistent storage in Zookeeper under path {@link #ZK_ASYNC_PERSISTENT} of all currently known
+ * (in flight, completed with success or error) async request id's.
*/
private final SizeLimitedDistributedMap trackedAsyncTasks;
@@ -93,48 +95,55 @@ public class DistributedApiAsyncTracker {
persistentIdsPath = rootPath + ZK_ASYNC_PERSISTENT;
inFlightIdsPath = rootPath + ZK_ASYNC_INFLIGHT;
- trackedAsyncTasks = new SizeLimitedDistributedMap(zkClient, persistentIdsPath, maxTrackedTasks, null);
+ trackedAsyncTasks =
+ new SizeLimitedDistributedMap(zkClient, persistentIdsPath, maxTrackedTasks, null);
inFlightAsyncTasks = new InFlightJobs(zkClient, inFlightIdsPath);
}
/**
- * After a successful call to this method, caller MUST eventually call {@link #setTaskCompleted} or {@link #cancelAsyncId}
- * otherwise the task will forever be considered as in progress.
+ * After a successful call to this method, caller MUST eventually call {@link #setTaskCompleted}
+ * or {@link #cancelAsyncId} otherwise the task will forever be considered as in progress.
+ *
* @param asyncId if {@code null} this method will do nothing.
- * @return {@code true} if the asyncId was not already in use (or is {@code null}) and {@code false} if it is already
- * in use and can't be allocated again.
+ * @return {@code true} if the asyncId was not already in use (or is {@code null}) and {@code
+ * false} if it is already in use and can't be allocated again.
*/
public boolean createNewAsyncJobTracker(String asyncId) {
if (asyncId == null) {
return true;
}
try {
- // First create the persistent node, with no content. If that fails, it means the asyncId has been previously used
- // and not yet cleared...
+ // First create the persistent node, with no content. If that fails, it means the asyncId has
+ // been previously used and not yet cleared...
if (!trackedAsyncTasks.putIfAbsent(asyncId, null)) {
return false;
}
- // ...then create the transient node. If the corresponding ephemeral node already exists, it means the persistent node
- // was removed (maybe trackedAsyncTasks grew too large? It has a max size then evicts). We cannot then track the new
- // provided asyncId, and have simply "revived" its persistent node...
+ // ...then create the transient node. If the corresponding ephemeral node already exists, it
+ // means the persistent node was removed (maybe trackedAsyncTasks grew too large? It has a max
+ // size then evicts). We cannot then track the new provided asyncId, and have simply "revived"
+ // its persistent node...
try {
inFlightAsyncTasks.createNewInFlightTask(asyncId);
return true;
} catch (KeeperException.NodeExistsException nee) {
- log.warn("Async id {} was not found in trackedAsyncTasks but was still present in inFlightAsyncTasks", asyncId);
+ log.warn(
+ "Async id {} was not found in trackedAsyncTasks but was still present in inFlightAsyncTasks",
+ asyncId);
return false;
}
} catch (KeeperException ke) {
throw new SolrException(SERVER_ERROR, "Error creating new async job tracking " + asyncId, ke);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
- throw new SolrException(SERVER_ERROR, "Interrupted creating new async job tracking " + asyncId, ie);
+ throw new SolrException(
+ SERVER_ERROR, "Interrupted creating new async job tracking " + asyncId, ie);
}
}
/**
- * Initially an async task is submitted. Just before it actually starts execution it is set to running.
+ * Initially an async task is submitted. Just before it actually starts execution it is set to
+ * running.
*/
public void setTaskRunning(String asyncId) {
if (asyncId == null) {
@@ -146,20 +155,21 @@ public class DistributedApiAsyncTracker {
throw new SolrException(SERVER_ERROR, "Error setting async task as running " + asyncId, ke);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
- throw new SolrException(SERVER_ERROR, "Interrupted setting async task as running " + asyncId, ie);
+ throw new SolrException(
+ SERVER_ERROR, "Interrupted setting async task as running " + asyncId, ie);
}
}
/**
- * Mark the completion (success or error) of an async task. The success or error is judged by the contents
- * of the {@link OverseerSolrResponse}.
+ * Mark the completion (success or error) of an async task. The success or error is judged by the
+ * contents of the {@link OverseerSolrResponse}.
*/
public void setTaskCompleted(String asyncId, OverseerSolrResponse solrResponse) {
if (asyncId == null) {
return;
}
- // First update the persistent node with the execution result, only then remove the transient node
- // (otherwise a status check might report the task in error)
+ // First update the persistent node with the execution result, only then remove the transient
+ // node (otherwise a status check might report the task in error)
try {
try {
trackedAsyncTasks.put(asyncId, OverseerSolrResponseSerializer.serialize(solrResponse));
@@ -170,13 +180,12 @@ public class DistributedApiAsyncTracker {
throw new SolrException(SERVER_ERROR, "Error setting async task as completed " + asyncId, ke);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
- throw new SolrException(SERVER_ERROR, "Interrupted setting async task as completed " + asyncId, ie);
+ throw new SolrException(
+ SERVER_ERROR, "Interrupted setting async task as completed " + asyncId, ie);
}
}
- /**
- * Cancels the tracking of an asyncId, if the corresponding command could not be executed.
- */
+ /** Cancels the tracking of an asyncId, if the corresponding command could not be executed. */
public void cancelAsyncId(String asyncId) {
if (asyncId == null) {
return;
@@ -196,25 +205,31 @@ public class DistributedApiAsyncTracker {
}
/**
- * This method implements the logic described in the class Javadoc table ({@link DistributedApiAsyncTracker}), using the
- * two sets of tracking info to build the actual state of an async task.<p>
+ * This method implements the logic described in the class Javadoc table ({@link
+ * DistributedApiAsyncTracker}), using the two sets of tracking info to build the actual state of
+ * an async task.
*
- * Returns the status of an async task, and when relevant the corresponding response from the command execution. The
- * returned {@link OverseerSolrResponse} will not be {@code null} when the returned
- * {@link RequestStatusState} is {@link RequestStatusState#COMPLETED} or {@link RequestStatusState#FAILED} (and will be
- * {@code null} in all other cases).
+ * <p>Returns the status of an async task, and when relevant the corresponding response from the
+ * command execution. The returned {@link OverseerSolrResponse} will not be {@code null} when the
+ * returned {@link RequestStatusState} is {@link RequestStatusState#COMPLETED} or {@link
+ * RequestStatusState#FAILED} (and will be {@code null} in all other cases).
*/
- public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId) throws Exception {
+ public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId)
+ throws Exception {
if (asyncId == null || !trackedAsyncTasks.contains(asyncId)) {
// This return addresses the whole "No persistent node" column from the table
return new Pair<>(RequestStatusState.NOT_FOUND, null);
}
byte[] data = trackedAsyncTasks.get(asyncId);
- OverseerSolrResponse response = data != null ? OverseerSolrResponseSerializer.deserialize(data) : null;
-
- if (response != null && response.getResponse().get("failure") == null && response.getResponse().get("exception") == null) {
- // This return addresses the whole "persistent=success OverseerSolrResponse" column from the table
+ OverseerSolrResponse response =
+ data != null ? OverseerSolrResponseSerializer.deserialize(data) : null;
+
+ if (response != null
+ && response.getResponse().get("failure") == null
+ && response.getResponse().get("exception") == null) {
+ // This return addresses the whole "persistent=success OverseerSolrResponse" column from the
+ // table
return new Pair<>(RequestStatusState.COMPLETED, response);
}
@@ -226,15 +241,19 @@ public class DistributedApiAsyncTracker {
return new Pair<>(RequestStatusState.RUNNING, null);
}
- // The task has failed, but there are two options: if response is null, it has failed because the node on which it was
- // running has crashed. If it is not null, it has failed because the execution has failed. Because caller expects a non
- // null response in any case, let's make up one if needed...
+ // The task has failed, but there are two options: if response is null, it has failed because
+ // the node on which it was running has crashed. If it is not null, it has failed because the
+ // execution has failed. Because caller expects a non null response in any case, let's make up
+ // one if needed...
if (response == null) {
- // Node crash has removed the ephemeral node, but the command did not complete execution (or didn't even start it, who
- // knows). We have a failure to report though so let's create a reasonable return response.
+ // Node crash has removed the ephemeral node, but the command did not complete execution (or
+ // didn't even start it, who knows). We have a failure to report though so let's create a
+ // reasonable return response.
NamedList<Object> results = new NamedList<>();
SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
- nl.add("msg", "Operation (asyncId: " + asyncId + ") failed due to server restart. Please resubmit.");
+ nl.add(
+ "msg",
+ "Operation (asyncId: " + asyncId + ") failed due to server restart. Please resubmit.");
nl.add("rspCode", SERVER_ERROR.code);
results.add("exception", nl);
response = new OverseerSolrResponse(results);
@@ -243,18 +262,21 @@ public class DistributedApiAsyncTracker {
return new Pair<>(RequestStatusState.FAILED, response);
}
-
/**
* Deletes a single async tracking ID if the corresponding job has completed or failed.
- * @return {@code true} if the {@code asyncId} was found to be of a completed or failed job and was successfully removed,
- * {@code false} if the id was not found or was found for a submitted or running job (these are not removed).
+ *
+ * @return {@code true} if the {@code asyncId} was found to be of a completed or failed job and
+ * was successfully removed, {@code false} if the id was not found or was found for a
+ * submitted or running job (these are not removed).
*/
public boolean deleteSingleAsyncId(String asyncId) throws Exception {
- return inFlightAsyncTasks.getInFlightState(asyncId) == InFlightJobs.State.NOT_FOUND && trackedAsyncTasks.remove(asyncId);
+ return inFlightAsyncTasks.getInFlightState(asyncId) == InFlightJobs.State.NOT_FOUND
+ && trackedAsyncTasks.remove(asyncId);
}
/**
- * Deletes all async id's for completed or failed async jobs. Does not touch id's for submitted or running jobs.
+ * Deletes all async id's for completed or failed async jobs. Does not touch id's for submitted or
+ * running jobs.
*/
public void deleteAllAsyncIds() throws Exception {
Collection<String> allTracked = trackedAsyncTasks.keys();
@@ -265,7 +287,8 @@ public class DistributedApiAsyncTracker {
}
/**
- * Manages the ephemeral nodes for tasks currently being processed (running or waiting for a lock) by a node.
+ * Manages the ephemeral nodes for tasks currently being processed (running or waiting for a lock)
+ * by a node.
*/
private static class InFlightJobs {
enum State {
@@ -292,7 +315,8 @@ public class DistributedApiAsyncTracker {
zkClient.makePath(rootNodePath, new byte[0], CreateMode.PERSISTENT, true);
}
} catch (KeeperException.NodeExistsException nee) {
- // Some other thread (on this or another JVM) beat us to create the node, that's ok, the node exists.
+ // Some other thread (on this or another JVM) beat us to create the node, that's ok, the
+ // node exists.
} catch (KeeperException ke) {
throw new SolrException(SERVER_ERROR, "Error creating root node " + rootNodePath, ke);
} catch (InterruptedException ie) {
@@ -302,11 +326,16 @@ public class DistributedApiAsyncTracker {
}
void createNewInFlightTask(String asyncId) throws KeeperException, InterruptedException {
- zkClient.create(getPath(asyncId), State.SUBMITTED.shorthand.getBytes(StandardCharsets.UTF_8), CreateMode.EPHEMERAL, true);
+ zkClient.create(
+ getPath(asyncId),
+ State.SUBMITTED.shorthand.getBytes(StandardCharsets.UTF_8),
+ CreateMode.EPHEMERAL,
+ true);
}
void setTaskRunning(String asyncId) throws KeeperException, InterruptedException {
- zkClient.setData(getPath(asyncId), State.RUNNING.shorthand.getBytes(StandardCharsets.UTF_8), true);
+ zkClient.setData(
+ getPath(asyncId), State.RUNNING.shorthand.getBytes(StandardCharsets.UTF_8), true);
}
void deleteInFlightTask(String asyncId) throws KeeperException, InterruptedException {
@@ -324,14 +353,20 @@ public class DistributedApiAsyncTracker {
} catch (KeeperException.NoNodeException nne) {
// Unlikely race, but not impossible...
if (log.isInfoEnabled()) {
- log.info("AsyncId ephemeral node " + getPath(asyncId) + " vanished from underneath us. Funny."); // nowarn
+ log.info(
+ "AsyncId ephemeral node "
+ + getPath(asyncId)
+ + " vanished from underneath us. Funny."); // nowarn
}
return State.NOT_FOUND;
}
if (bytes == null) {
// This is not expected. The ephemeral nodes are always created with content.
- log.error("AsyncId ephemeral node " + getPath(asyncId) + " has null content. This is unexpected (bug)."); // nowarn
+ log.error(
+ "AsyncId ephemeral node "
+ + getPath(asyncId)
+ + " has null content. This is unexpected (bug)."); // nowarn
return State.NOT_FOUND;
}
@@ -341,7 +376,12 @@ public class DistributedApiAsyncTracker {
} else if (State.SUBMITTED.shorthand.equals(content)) {
return State.SUBMITTED;
} else {
- log.error("AsyncId ephemeral node " + getPath(asyncId) + " has unexpected content \"" + content + "\". This is unexpected (bug)."); // nowarn
+ log.error(
+ "AsyncId ephemeral node "
+ + getPath(asyncId)
+ + " has unexpected content \""
+ + content
+ + "\". This is unexpected (bug)."); // nowarn
return State.NOT_FOUND;
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedClusterStateUpdater.java b/solr/core/src/java/org/apache/solr/cloud/DistributedClusterStateUpdater.java
index 523fe6b..f76fb1f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedClusterStateUpdater.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedClusterStateUpdater.java
@@ -17,6 +17,21 @@
package org.apache.solr.cloud;
+import static java.util.Collections.singletonMap;
+import static org.apache.solr.cloud.overseer.ZkStateWriter.NO_OP;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTIONS_ZKNODE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
+
+import java.lang.invoke.MethodHandles;
+import java.util.*;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.cloud.api.collections.CollectionHandlingUtils;
import org.apache.solr.cloud.overseer.*;
@@ -32,107 +47,110 @@ import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.lang.invoke.MethodHandles;
-import java.util.*;
-
-import static java.util.Collections.singletonMap;
-import static org.apache.solr.cloud.overseer.ZkStateWriter.NO_OP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTIONS_ZKNODE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
-
/**
- * Gives access to distributed cluster state update methods and allows code to inquire whether distributed state update is enabled.
+ * Gives access to distributed cluster state update methods and allows code to inquire whether
+ * distributed state update is enabled.
*/
public class DistributedClusterStateUpdater {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
/**
- * When {@code true} each node updates Zookeeper directly for changing state.json files. When {@code false} messages
- * are instead sent to the Overseer and the update is done there.
+ * When {@code true} each node updates Zookeeper directly for changing state.json files. When
+ * {@code false} messages are instead sent to the Overseer and the update is done there.
*/
private final boolean useDistributedStateUpdate;
/**
- * Builds an instance with the specified behavior regarding distribution of state updates, allowing to know distributed
- * updates are not enabled (parameter {@code useDistributedStateUpdate} is {@code false}), or when they are (parameter
- * is {@code true)}, gives access to methods and classes allowing the execution of the updates.
+ * Builds an instance with the specified behavior regarding distribution of state updates,
+ * allowing to know distributed updates are not enabled (parameter {@code
+ * useDistributedStateUpdate} is {@code false}), or when they are (parameter is {@code true)},
+ * gives access to methods and classes allowing the execution of the updates.
*
- * @param useDistributedStateUpdate when this parameter is {@code false}, only method expected to ever be called on this
- * instance is {@link #isDistributedStateUpdate}, and it will return {@code false}.
+ * @param useDistributedStateUpdate when this parameter is {@code false}, only method expected to
+ * ever be called on this instance is {@link #isDistributedStateUpdate}, and it will return
+ * {@code false}.
*/
public DistributedClusterStateUpdater(boolean useDistributedStateUpdate) {
this.useDistributedStateUpdate = useDistributedStateUpdate;
if (log.isInfoEnabled()) {
- log.info("Creating DistributedClusterStateUpdater with useDistributedStateUpdate=" + useDistributedStateUpdate
- + ". Solr will be using " + (useDistributedStateUpdate ? "distributed" : "Overseer based") + " cluster state updates."); // nowarn
+ log.info(
+ "Creating DistributedClusterStateUpdater with useDistributedStateUpdate="
+ + useDistributedStateUpdate
+ + ". Solr will be using "
+ + (useDistributedStateUpdate ? "distributed" : "Overseer based")
+ + " cluster state updates."); // nowarn
}
}
/**
- * Create a new instance of {@link StateChangeRecorder} for a given collection and a given intention (collection
- * creation vs. operations on an existing collection)
+ * Create a new instance of {@link StateChangeRecorder} for a given collection and a given
+ * intention (collection creation vs. operations on an existing collection)
*/
- public StateChangeRecorder createStateChangeRecorder(String collectionName, boolean isCollectionCreation) {
+ public StateChangeRecorder createStateChangeRecorder(
+ String collectionName, boolean isCollectionCreation) {
if (!useDistributedStateUpdate) {
- // Seeing this exception or any other of this kind here means there's a big bug in the code. No user input can cause this.
- throw new IllegalStateException("Not expecting to create instances of StateChangeRecorder when not using distributed state update");
+ // Seeing this exception or any other of this kind here means there's a big bug in the code.
+ // No user input can cause this.
+ throw new IllegalStateException(
+ "Not expecting to create instances of StateChangeRecorder when not using distributed state update");
}
return new StateChangeRecorder(collectionName, isCollectionCreation);
}
- /**
- * Syntactic sugar to allow a single change to the cluster state to be made in a single call.
- */
- public void doSingleStateUpdate(MutatingCommand command, ZkNodeProps message,
- SolrCloudManager scm, ZkStateReader zkStateReader) throws KeeperException, InterruptedException {
+ /** Syntactic sugar to allow a single change to the cluster state to be made in a single call. */
+ public void doSingleStateUpdate(
+ MutatingCommand command,
+ ZkNodeProps message,
+ SolrCloudManager scm,
+ ZkStateReader zkStateReader)
+ throws KeeperException, InterruptedException {
if (!useDistributedStateUpdate) {
- throw new IllegalStateException("Not expecting to execute doSingleStateUpdate when not using distributed state update");
+ throw new IllegalStateException(
+ "Not expecting to execute doSingleStateUpdate when not using distributed state update");
}
String collectionName = command.getCollectionName(message);
- final StateChangeRecorder scr = new StateChangeRecorder(collectionName, command.isCollectionCreation());
+ final StateChangeRecorder scr =
+ new StateChangeRecorder(collectionName, command.isCollectionCreation());
scr.record(command, message);
scr.executeStateUpdates(scm, zkStateReader);
}
public void executeNodeDownStateUpdate(String nodeName, ZkStateReader zkStateReader) {
if (!useDistributedStateUpdate) {
- throw new IllegalStateException("Not expecting to execute executeNodeDownStateUpdate when not using distributed state update");
+ throw new IllegalStateException(
+ "Not expecting to execute executeNodeDownStateUpdate when not using distributed state update");
}
CollectionNodeDownChangeCalculator.executeNodeDownStateUpdate(nodeName, zkStateReader);
}
/**
- * When this method returns {@code false} the legacy behavior of enqueueing cluster state update messages to Overseer
- * should be used and no other method of this class should be called.
+ * When this method returns {@code false} the legacy behavior of enqueueing cluster state update
+ * messages to Overseer should be used and no other method of this class should be called.
*/
public boolean isDistributedStateUpdate() {
return useDistributedStateUpdate;
}
/**
- * Naming of enum instances are the mutator object name (e.g. {@code Cluster} for {@link ClusterStateMutator} or
- * {@code Collection} for {@link CollectionMutator}) followed by the method name of the mutator.
- * For example {@link #SliceAddReplica} represents {@link SliceMutator#addReplica}.
- * <p>
- * Even though the various mutator classes do not implement any common interface, luckily their constructors and methods
- * take the same set of parameters so all can be called from the enum method {@link #buildWriteCommand(SolrCloudManager, ClusterState, ZkNodeProps)}.
- * <p>
- * Given that {@link OverseerAction#DOWNNODE} is different (it returns a list of write commands and impacts more than one collection),
- * it is handled specifically in {@link CollectionNodeDownChangeCalculator#executeNodeDownStateUpdate}.
+ * Naming of enum instances are the mutator object name (e.g. {@code Cluster} for {@link
+ * ClusterStateMutator} or {@code Collection} for {@link CollectionMutator}) followed by the
+ * method name of the mutator. For example {@link #SliceAddReplica} represents {@link
+ * SliceMutator#addReplica}.
+ *
+ * <p>Even though the various mutator classes do not implement any common interface, luckily their
+ * constructors and methods take the same set of parameters so all can be called from the enum
+ * method {@link #buildWriteCommand(SolrCloudManager, ClusterState, ZkNodeProps)}.
+ *
+ * <p>Given that {@link OverseerAction#DOWNNODE} is different (it returns a list of write commands
+ * and impacts more than one collection), it is handled specifically in {@link
+ * CollectionNodeDownChangeCalculator#executeNodeDownStateUpdate}.
*/
public enum MutatingCommand {
BalanceShardsUnique(BALANCESHARDUNIQUE, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(cs, message);
// Next line is where the actual work is done
if (dProp.balanceProperty()) {
@@ -144,7 +162,8 @@ public class DistributedClusterStateUpdater {
},
ClusterCreateCollection(CREATE, CommonParams.NAME) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new ClusterStateMutator(scm).createCollection(cs, message);
}
@@ -155,84 +174,98 @@ public class DistributedClusterStateUpdater {
},
ClusterDeleteCollection(DELETE, CommonParams.NAME) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new ClusterStateMutator(scm).deleteCollection(cs, message);
}
},
CollectionDeleteShard(DELETESHARD, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new CollectionMutator(scm).deleteShard(cs, message);
}
},
CollectionModifyCollection(MODIFYCOLLECTION, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new CollectionMutator(scm).modifyCollection(cs, message);
}
},
CollectionCreateShard(CREATESHARD, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new CollectionMutator(scm).createShard(cs, message);
}
},
ReplicaAddReplicaProperty(ADDREPLICAPROP, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new ReplicaMutator(scm).addReplicaProperty(cs, message);
}
},
ReplicaDeleteReplicaProperty(DELETEREPLICAPROP, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new ReplicaMutator(scm).deleteReplicaProperty(cs, message);
}
},
ReplicaSetState(null, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new ReplicaMutator(scm).setState(cs, message);
}
},
SliceAddReplica(ADDREPLICA, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new SliceMutator(scm).addReplica(cs, message);
}
},
SliceAddRoutingRule(null, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new SliceMutator(scm).addRoutingRule(cs, message);
}
},
SliceRemoveReplica(null, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new SliceMutator(scm).removeReplica(cs, message);
}
},
SliceRemoveRoutingRule(null, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new SliceMutator(scm).removeRoutingRule(cs, message);
}
},
SliceSetShardLeader(null, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new SliceMutator(scm).setShardLeader(cs, message);
}
},
SliceUpdateShardState(null, ZkStateReader.COLLECTION_PROP) {
@Override
- public ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
+ public ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message) {
return new SliceMutator(scm).updateShardState(cs, message);
}
};
- private static final EnumMap<CollectionParams.CollectionAction, MutatingCommand> actionsToCommands;
+ private static final EnumMap<CollectionParams.CollectionAction, MutatingCommand>
+ actionsToCommands;
static {
actionsToCommands = new EnumMap<>(CollectionParams.CollectionAction.class);
@@ -246,31 +279,35 @@ public class DistributedClusterStateUpdater {
private final CollectionParams.CollectionAction collectionAction;
private final String collectionNameParamName;
- MutatingCommand(CollectionParams.CollectionAction collectionAction, String collectionNameParamName) {
+ MutatingCommand(
+ CollectionParams.CollectionAction collectionAction, String collectionNameParamName) {
this.collectionAction = collectionAction;
this.collectionNameParamName = collectionNameParamName;
}
- /**
- * mutating commands that return a single ZkWriteCommand override this method
- */
- public abstract ZkWriteCommand buildWriteCommand(SolrCloudManager scm, ClusterState cs, ZkNodeProps message);
+ /** mutating commands that return a single ZkWriteCommand override this method */
+ public abstract ZkWriteCommand buildWriteCommand(
+ SolrCloudManager scm, ClusterState cs, ZkNodeProps message);
public String getCollectionName(ZkNodeProps message) {
return message.getStr(collectionNameParamName);
}
/**
- * @return the {@link MutatingCommand} corresponding to the passed {@link org.apache.solr.common.params.CollectionParams.CollectionAction} or
- * {@code null} if no cluster state update command is defined for that action (given that {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
- * are used for the Collection API and only some are used for the cluster state updates, this is expected).
+ * @return the {@link MutatingCommand} corresponding to the passed {@link
+ * org.apache.solr.common.params.CollectionParams.CollectionAction} or {@code null} if no
+ * cluster state update command is defined for that action (given that {@link
+ * org.apache.solr.common.params.CollectionParams.CollectionAction} are used for the
+ * Collection API and only some are used for the cluster state updates, this is expected).
*/
- public static MutatingCommand getCommandFor(CollectionParams.CollectionAction collectionAction) {
+ public static MutatingCommand getCommandFor(
+ CollectionParams.CollectionAction collectionAction) {
return actionsToCommands.get(collectionAction);
}
/**
- * Given only one command creates a collection {@link #ClusterCreateCollection}, the default implementation is provided here.
+ * Given only one command creates a collection {@link #ClusterCreateCollection}, the default
+ * implementation is provided here.
*/
public boolean isCollectionCreation() {
return false;
@@ -278,63 +315,72 @@ public class DistributedClusterStateUpdater {
}
/**
- * Instances of this class are the fundamental building block of the CAS (Compare and Swap) update approach. These instances
- * accept an initial cluster state (as present in Zookeeper basically) and apply to it a set of modifications that are
- * then attempted to be written back to Zookeeper {@link ZkUpdateApplicator is driving this process}.
- * If the update fails (due to a concurrent update), the Zookeeper content is read again, the changes (updates) are
- * applied to it again and a new write attempt is made. This guarantees than an update does not overwrite data just
+ * Instances of this class are the fundamental building block of the CAS (Compare and Swap) update
+ * approach. These instances accept an initial cluster state (as present in Zookeeper basically)
+ * and apply to it a set of modifications that are then attempted to be written back to Zookeeper
+ * {@link ZkUpdateApplicator is driving this process}. If the update fails (due to a concurrent
+ * update), the Zookeeper content is read again, the changes (updates) are applied to it again and
+ * a new write attempt is made. This guarantees than an update does not overwrite data just
* written by a concurrent update happening from the same or from another node.
*/
interface StateChangeCalculator {
String getCollectionName();
/**
- * @return {@code true} if this updater is computing updates for creating a collection that does not exist yet.
+ * @return {@code true} if this updater is computing updates for creating a collection that does
+ * not exist yet.
*/
boolean isCollectionCreation();
/**
- * Given an initial {@link ClusterState}, computes after applying updates the cluster state to be written to state.json
- * (made available through {@link #getUpdatedClusterState()}) as well as the list of per replica operations (made available
- * through {@link #getPerReplicaStatesOps()}). Any or both of these methods will return {@code null} if there is no
- * corresponding update to apply.
+ * Given an initial {@link ClusterState}, computes after applying updates the cluster state to
+ * be written to state.json (made available through {@link #getUpdatedClusterState()}) as well
+ * as the list of per replica operations (made available through {@link
+ * #getPerReplicaStatesOps()}). Any or both of these methods will return {@code null} if there
+ * is no corresponding update to apply.
*/
void computeUpdates(ClusterState currentState, SolrZkClient client);
/**
* Method can only be called after {@link #computeUpdates} has been called.
+ *
* @return the new state to write into {@code state.json} or {@code null} if no update needed.
*/
ClusterState getUpdatedClusterState();
/**
* Method can only be called after {@link #computeUpdates} has been called.
+ *
* @return {@code null} when there are no per replica state ops
*/
List<PerReplicaStatesOps> getPerReplicaStatesOps();
}
/**
- * This class is passed a {@link StateChangeCalculator} targeting a single collection that is able to apply an update to an
- * initial cluster state and return the updated cluster state. The {@link StateChangeCalculator} is used (possibly multiple times)
- * to do a Compare And Swap (a.k.a conditional update or CAS) of the collection's {@code state.json} Zookeeper file.<p>
+ * This class is passed a {@link StateChangeCalculator} targeting a single collection that is able
+ * to apply an update to an initial cluster state and return the updated cluster state. The {@link
+ * StateChangeCalculator} is used (possibly multiple times) to do a Compare And Swap (a.k.a
+ * conditional update or CAS) of the collection's {@code state.json} Zookeeper file.
*
- * When there are per replica states to update, they are attempted once (they do their own Compare And Swap), before
- * the (potentially multiple) attempts to update the {@code state.json} file. This conforms to the strategy in place
- * when {@code state.json} updates are sent to the Overseer to do. See {@link ZkStateWriter#writePendingUpdates}.
+ * <p>When there are per replica states to update, they are attempted once (they do their own
+ * Compare And Swap), before the (potentially multiple) attempts to update the {@code state.json}
+ * file. This conforms to the strategy in place when {@code state.json} updates are sent to the
+ * Overseer to do. See {@link ZkStateWriter#writePendingUpdates}.
*/
- static private class ZkUpdateApplicator {
+ private static class ZkUpdateApplicator {
/**
- * When trying to update a {@code state.json} file that keeps getting changed by concurrent updater, the number of attempts
- * made before giving up. This is likely way too high, if we get to 50 failed attempts something else went wrong.
- * To be reconsidered once Collection API commands are distributed as well.
+ * When trying to update a {@code state.json} file that keeps getting changed by concurrent
+ * updater, the number of attempts made before giving up. This is likely way too high, if we get
+ * to 50 failed attempts something else went wrong. To be reconsidered once Collection API
+ * commands are distributed as well.
*/
public static final int CAS_MAX_ATTEMPTS = 50;
private final ZkStateReader zkStateReader;
private final StateChangeCalculator updater;
- static void applyUpdate(ZkStateReader zkStateReader, StateChangeCalculator updater) throws KeeperException, InterruptedException {
+ static void applyUpdate(ZkStateReader zkStateReader, StateChangeCalculator updater)
+ throws KeeperException, InterruptedException {
ZkUpdateApplicator zua = new ZkUpdateApplicator(zkStateReader, updater);
zua.applyUpdate();
}
@@ -345,9 +391,9 @@ public class DistributedClusterStateUpdater {
}
/**
- * By delegating work to {@link PerReplicaStatesOps} for per replica state updates, and using optimistic locking
- * (with retries) to directly update the content of {@code state.json}, updates Zookeeper with the changes computed
- * by the {@link StateChangeCalculator}.
+ * By delegating work to {@link PerReplicaStatesOps} for per replica state updates, and using
+ * optimistic locking (with retries) to directly update the content of {@code state.json},
+ * updates Zookeeper with the changes computed by the {@link StateChangeCalculator}.
*/
private void applyUpdate() throws KeeperException, InterruptedException {
/* Initial slightly naive implementation (later on we should consider some caching between updates...).
@@ -365,27 +411,32 @@ public class DistributedClusterStateUpdater {
// Note we DO NOT track nor use the live nodes in the cluster state.
// That may means the two abstractions (collection metadata vs. nodes) should be separated.
- // For now trying to diverge as little as possible from existing data structures and code given the need to
- // support both the old way (Overseer) and new way (distributed) of handling cluster state update.
+ // For now trying to diverge as little as possible from existing data structures and code
+ // given the need to support both the old way (Overseer) and new way (distributed) of handling
+ // cluster state update.
final Set<String> liveNodes = Collections.emptySet();
- // Per Replica States updates are done before all other updates and not subject to the number of attempts of CAS
- // made here, given they have their own CAS strategy and implementation (see PerReplicaStatesOps.persist()).
+ // Per Replica States updates are done before all other updates and not subject to the number
+ // of attempts of CAS made here, given they have their own CAS strategy and implementation
+ // (see PerReplicaStatesOps.persist()).
boolean firstAttempt = true;
- // When there are multiple retries of state.json write and the cluster state gets updated over and over again with
- // the changes done in the per replica states, we avoid refetching those multiple times.
+ // When there are multiple retries of state.json write and the cluster state gets updated over
+ // and over again with the changes done in the per replica states, we avoid refetching those
+ // multiple times.
PerReplicaStates fetchedPerReplicaStates = null;
- // Later on (when Collection API commands are distributed) we will have to rely on the version of state.json
- // to implement the replacement of Collection API locking. Then we should not blindly retry cluster state updates
- // as we do here but instead intelligently fail (or retry completely) the Collection API call when seeing that
- // state.json was changed by a concurrent command execution.
- // The loop below is ok for distributing cluster state updates from Overseer to all nodes while Collection API
- // commands are still executed on the Overseer and manage their locking the old fashioned way.
+ // Later on (when Collection API commands are distributed) we will have to rely on the version
+ // of state.json to implement the replacement of Collection API locking. Then we should not
+ // blindly retry cluster state updates as we do here but instead intelligently fail (or retry
+ // completely) the Collection API call when seeing that state.json was changed by a concurrent
+ // command execution. The loop below is ok for distributing cluster state updates from
+ // Overseer to all nodes while Collection API commands are still executed on the Overseer and
+ // manage their locking the old fashioned way.
for (int attempt = 0; attempt < CAS_MAX_ATTEMPTS; attempt++) {
// Start by reading the current state.json (if this is an update).
- // TODO Eventually rethink the way each node manages and caches its copy of the cluster state. Knowing about all collections in the cluster might not be needed.
+ // TODO Eventually rethink the way each node manages and caches its copy of the cluster
+ // state. Knowing about all collections in the cluster might not be needed.
ClusterState initialClusterState;
if (updater.isCollectionCreation()) {
initialClusterState = new ClusterState(liveNodes, Collections.emptyMap());
@@ -394,10 +445,11 @@ public class DistributedClusterStateUpdater {
initialClusterState = fetchStateForCollection();
}
- // Apply the desired changes. Note that the cluster state passed to the chain of mutators is totally up to date
- // (it's read from ZK just above). So assumptions made in the mutators (like SliceMutator.removeReplica() deleting
- // the whole collection if it's not found) are ok. Actually in the removeReplica case, the collection will always
- // exist otherwise the call to fetchStateForCollection() above would have failed.
+ // Apply the desired changes. Note that the cluster state passed to the chain of mutators is
+ // totally up to date (it's read from ZK just above). So assumptions made in the mutators
+ // (like SliceMutator.removeReplica() deleting the whole collection if it's not found) are
+ // ok. Actually in the removeReplica case, the collection will always exist otherwise the
+ // call to fetchStateForCollection() above would have failed.
updater.computeUpdates(initialClusterState, zkStateReader.getZkClient());
ClusterState updatedState = updater.getUpdatedClusterState();
@@ -421,17 +473,25 @@ public class DistributedClusterStateUpdater {
}
// Get the latest version of the collection from the cluster state first.
- // There is no notion of "cached" here (the boolean passed below) as we the updatedState is based on CollectionRef
- DocCollection docCollection = updatedState.getCollectionOrNull(updater.getCollectionName(), true);
-
- // If we did update per replica states and we're also updating state.json, update the content of state.json to reflect
- // the changes made to replica states. Not strictly necessary (the state source of truth is in per replica states), but nice to have...
+ // There is no notion of "cached" here (the boolean passed below) as we the updatedState is
+ // based on CollectionRef
+ DocCollection docCollection =
+ updatedState.getCollectionOrNull(updater.getCollectionName(), true);
+
+ // If we did update per replica states and we're also updating state.json, update the
+ // content of state.json to reflect the changes made to replica states. Not strictly
+ // necessary (the state source of truth is in per replica states), but nice to have...
if (allStatesOps != null) {
if (docCollection != null) {
- // Fetch the per replica states updates done previously or skip fetching if we already have them
- fetchedPerReplicaStates = PerReplicaStates.fetch(docCollection.getZNode(), zkStateReader.getZkClient(), fetchedPerReplicaStates);
+ // Fetch the per replica states updates done previously or skip fetching if we already
+ // have them
+ fetchedPerReplicaStates =
+ PerReplicaStates.fetch(
+ docCollection.getZNode(), zkStateReader.getZkClient(), fetchedPerReplicaStates);
// Transpose the per replica states into the cluster state
- updatedState = updatedState.copyWith(updater.getCollectionName(), docCollection.copyWith(fetchedPerReplicaStates));
+ updatedState =
+ updatedState.copyWith(
+ updater.getCollectionName(), docCollection.copyWith(fetchedPerReplicaStates));
}
}
@@ -441,59 +501,73 @@ public class DistributedClusterStateUpdater {
return; // state.json updated successfully.
} catch (KeeperException.BadVersionException bve) {
if (updater.isCollectionCreation()) {
- // Not expecting to see this exception when creating new state.json fails, so throwing it up the food chain.
+ // Not expecting to see this exception when creating new state.json fails, so throwing
+ // it up the food chain.
throw bve;
}
}
- // We've tried to update an existing state.json and got a BadVersionException. We'll try again a few times.
- // When only two threads compete, no point in waiting: if we lost this time we'll get it next time right away.
- // But if more threads compete, then waiting a bit (random delay) can improve our chances. The delay should in
- // theory grow as the number of concurrent threads attempting updates increase, but we don't know that number, so
- // doing exponential backoff instead.
- // With "per replica states" collections, concurrent attempts of even just two threads are expected to be extremely rare.
- Thread.sleep(CollectionHandlingUtils.RANDOM.nextInt(attempt < 13 ? 1 << attempt : 1 << 13)); // max wait 2^13ms=8.192 sec
- }
-
- // We made quite a few attempts but failed repeatedly. This is pretty bad but we can't loop trying forever.
- // Offering a job to the Overseer wouldn't usually fail if the ZK queue can be written to (but the Overseer can then
- // loop forever attempting the update).
- // We do want whoever called us to fail right away rather than to wait for a cluster change and timeout because it
- // didn't happen. Likely need to review call by call what is the appropriate behaviour, especially once Collection
- // API is distributed (because then the Collection API call will fail if the underlying cluster state update cannot
- // be done, and that's a desirable thing).
- throw new KeeperException.BadVersionException(ZkStateReader.getCollectionPath(updater.getCollectionName()));
+ // We've tried to update an existing state.json and got a BadVersionException. We'll try
+ // again a few times. When only two threads compete, no point in waiting: if we lost this
+ // time we'll get it next time right away. But if more threads compete, then waiting a bit
+ // (random delay) can improve our chances. The delay should in theory grow as the number of
+ // concurrent threads attempting updates increase, but we don't know that number, so doing
+ // exponential backoff instead. With "per replica states" collections, concurrent attempts
+ // of even just two threads are expected to be extremely rare.
+ Thread.sleep(
+ CollectionHandlingUtils.RANDOM.nextInt(
+ attempt < 13 ? 1 << attempt : 1 << 13)); // max wait 2^13ms=8.192 sec
+ }
+
+ // We made quite a few attempts but failed repeatedly. This is pretty bad but we can't loop
+ // trying forever. Offering a job to the Overseer wouldn't usually fail if the ZK queue can be
+ // written to (but the Overseer can then loop forever attempting the update). We do want
+ // whoever called us to fail right away rather than to wait for a cluster change and timeout
+ // because it didn't happen. Likely need to review call by call what is the appropriate
+ // behaviour, especially once Collection API is distributed (because then the Collection API
+ // call will fail if the underlying cluster state update cannot be done, and that's a
+ // desirable thing).
+ throw new KeeperException.BadVersionException(
+ ZkStateReader.getCollectionPath(updater.getCollectionName()));
}
/**
- * After the computing of the new {@link ClusterState} containing all needed updates to the collection based on what the
- * {@link StateChangeCalculator} computed, this method does an update in ZK to the collection's {@code state.json}. It is the
- * equivalent of Overseer's {@link ZkStateWriter#writePendingUpdates} (in its actions related to {@code state.json}
- * as opposed to the per replica states).
- * <p>
- * Note that in a similar way to what happens in {@link ZkStateWriter#writePendingUpdates}, collection delete is handled
- * as a special case. (see comment on {@link DistributedClusterStateUpdater.StateChangeRecorder.RecordedMutationsPlayer}
- * on why the code has to be duplicated)<p>
+ * After the computing of the new {@link ClusterState} containing all needed updates to the
+ * collection based on what the {@link StateChangeCalculator} computed, this method does an
+ * update in ZK to the collection's {@code state.json}. It is the equivalent of Overseer's
+ * {@link ZkStateWriter#writePendingUpdates} (in its actions related to {@code state.json} as
+ * opposed to the per replica states).
+ *
+ * <p>Note that in a similar way to what happens in {@link ZkStateWriter#writePendingUpdates},
+ * collection delete is handled as a special case. (see comment on {@link
+ * DistributedClusterStateUpdater.StateChangeRecorder.RecordedMutationsPlayer} on why the code
+ * has to be duplicated)
*
- * <b>Note for the future:</b> Given this method is where the actually write to ZK is done, that's the place where we
- * can rebuild a DocCollection with updated zk version. Eventually if we maintain a cache of recently used collections,
- * we want to capture the updated collection and put it in the cache to avoid reading it again (unless it changed,
- * the CAS will fail and we will refresh).<p>
+ * <p><b>Note for the future:</b> Given this method is where the actually write to ZK is done,
+ * that's the place where we can rebuild a DocCollection with updated zk version. Eventually if
+ * we maintain a cache of recently used collections, we want to capture the updated collection
+ * and put it in the cache to avoid reading it again (unless it changed, the CAS will fail and
+ * we will refresh).
*
- * This could serve as the basis for a strategy where each node does not need any view of all collections in the cluster
- * but only a cache of recently used collections (possibly not even needing watches on them, but we'll discuss this later).
+ * <p>This could serve as the basis for a strategy where each node does not need any view of all
+ * collections in the cluster but only a cache of recently used collections (possibly not even
+ * needing watches on them, but we'll discuss this later).
*/
- private void doStateDotJsonCasUpdate(ClusterState updatedState) throws KeeperException, InterruptedException {
+ private void doStateDotJsonCasUpdate(ClusterState updatedState)
+ throws KeeperException, InterruptedException {
String jsonPath = ZkStateReader.getCollectionPath(updater.getCollectionName());
// Collection delete
if (!updatedState.hasCollection(updater.getCollectionName())) {
- // We do not have a collection znode version to test we delete the right version of state.json. But this doesn't really matter:
- // if we had one, and the delete failed (because state.json got updated in the meantime), we would re-read the collection
- // state, update our version, run the CAS delete again and it will pass. Which means that one way or another, deletes are final.
- // I hope nobody deletes a collection then creates a new one with the same name immediately (although the creation should fail
- // if the znode still exists, so the creation would only succeed after the delete made it, and we're ok).
- // With Overseer based updates the same behavior can be observed: a collection update is enqueued followed by the
- // collection delete before the update was executed.
+ // We do not have a collection znode version to test we delete the right version of
+ // state.json. But this doesn't really matter:
+ // if we had one, and the delete failed (because state.json got updated in the meantime), we
+ // would re-read the collection state, update our version, run the CAS delete again and it
+ // will pass. Which means that one way or another, deletes are final. I hope nobody deletes
+ // a collection then creates a new one with the same name immediately (although the creation
+ // should fail if the znode still exists, so the creation would only succeed after the
+ // delete made it, and we're ok).
+ // With Overseer based updates the same behavior can be observed: a collection update is
+ // enqueued followed by the collection delete before the update was executed.
log.debug("going to recursively delete state.json at {}", jsonPath);
zkStateReader.getZkClient().clean(jsonPath);
} else {
@@ -508,66 +582,85 @@ public class DistributedClusterStateUpdater {
} else {
// We're updating an existing state.json
if (log.isDebugEnabled()) {
- log.debug("going to update collection {} version: {}", jsonPath, collection.getZNodeVersion());
+ log.debug(
+ "going to update collection {} version: {}",
+ jsonPath,
+ collection.getZNodeVersion());
}
- zkStateReader.getZkClient().setData(jsonPath, stateJson, collection.getZNodeVersion(), true);
+ zkStateReader
+ .getZkClient()
+ .setData(jsonPath, stateJson, collection.getZNodeVersion(), true);
}
}
}
/**
- * Creates a {@link ClusterState} with the state of an existing single collection, with no live nodes information.
- * Eventually this state should be reused across calls if it is fresh enough... (we have to deal anyway with failures
- * of conditional updates so trying to use non fresh data is ok, a second attempt will be made)
+ * Creates a {@link ClusterState} with the state of an existing single collection, with no live
+ * nodes information. Eventually this state should be reused across calls if it is fresh
+ * enough... (we have to deal anyway with failures of conditional updates so trying to use non
+ * fresh data is ok, a second attempt will be made)
*/
private ClusterState fetchStateForCollection() throws KeeperException, InterruptedException {
String collectionStatePath = ZkStateReader.getCollectionPath(updater.getCollectionName());
Stat stat = new Stat();
byte[] data = zkStateReader.getZkClient().getData(collectionStatePath, null, stat, true);
- // This factory method can detect a missing configName and supply it by reading it from the old ZK location.
+ // This factory method can detect a missing configName and supply it by reading it from the
+ // old ZK location.
// TODO in Solr 10 remove that factory method
- ClusterState clusterState = ClusterState.createFromJsonSupportingLegacyConfigName(
- stat.getVersion(), data, Collections.emptySet(), updater.getCollectionName(), zkStateReader.getZkClient());
+ ClusterState clusterState =
+ ClusterState.createFromJsonSupportingLegacyConfigName(
+ stat.getVersion(),
+ data,
+ Collections.emptySet(),
+ updater.getCollectionName(),
+ zkStateReader.getZkClient());
return clusterState;
}
}
/**
- * Class handling the distributed updates of collection's Zookeeper files {@code state.json} based on multiple updates
- * applied to a single collection (as is sometimes done by *Cmd classes implementing the Collection API commands).<p>
- * Previously these updates were sent one by one to Overseer and then grouped by org.apache.solr.cloud.Overseer.ClusterStateUpdater.
- * <p>
- * Records desired changes to {@code state.json} files in Zookeeper (as are done by the family of mutator classes such as
- * {@link org.apache.solr.cloud.overseer.ClusterStateMutator}, {@link org.apache.solr.cloud.overseer.CollectionMutator}
- * etc.) in order to be able to later execute them on the actual content of the {@code state.json} files using optimistic
- * locking (and retry a few times if the optimistic locking failed).
- * <p>
- * Instances are <b>not</b> thread safe.
+ * Class handling the distributed updates of collection's Zookeeper files {@code state.json} based
+ * on multiple updates applied to a single collection (as is sometimes done by *Cmd classes
+ * implementing the Collection API commands).
+ *
+ * <p>Previously these updates were sent one by one to Overseer and then grouped by
+ * org.apache.solr.cloud.Overseer.ClusterStateUpdater.
+ *
+ * <p>Records desired changes to {@code state.json} files in Zookeeper (as are done by the family
+ * of mutator classes such as {@link org.apache.solr.cloud.overseer.ClusterStateMutator}, {@link
+ * org.apache.solr.cloud.overseer.CollectionMutator} etc.) in order to be able to later execute
+ * them on the actual content of the {@code state.json} files using optimistic locking (and retry
+ * a few times if the optimistic locking failed).
+ *
+ * <p>Instances are <b>not</b> thread safe.
*/
public static class StateChangeRecorder {
final List<Pair<MutatingCommand, ZkNodeProps>> mutations;
- /**
- * The collection name for which are all recorded commands
- */
+ /** The collection name for which are all recorded commands */
final String collectionName;
/**
- * {@code true} if recorded commands assume creation of the collection {@code state.json} file.<br>
- * {@code false} if an existing {@code state.json} is to be updated.<p>
+ * {@code true} if recorded commands assume creation of the collection {@code state.json} file.
+ * <br>
+ * {@code false} if an existing {@code state.json} is to be updated.
+ *
* <p>
- * This variable is used for defensive programming and catching issues. It might be removed once we're done removing and testing
- * the distribution of the cluster state update updates.
+ *
+ * <p>This variable is used for defensive programming and catching issues. It might be removed
+ * once we're done removing and testing the distribution of the cluster state update updates.
*/
final boolean isCollectionCreation;
/**
- * For collection creation recording, there should be only one actual creation (and it should be the first recorded command
+ * For collection creation recording, there should be only one actual creation (and it should be
+ * the first recorded command
*/
boolean creationCommandRecorded = false;
private StateChangeRecorder(String collectionName, boolean isCollectionCreation) {
if (collectionName == null) {
- final String err = "Internal bug. collectionName=null (isCollectionCreation=" + isCollectionCreation + ")";
+ final String err =
+ "Internal bug. collectionName=null (isCollectionCreation=" + isCollectionCreation + ")";
log.error(err);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, err);
}
@@ -577,23 +670,29 @@ public class DistributedClusterStateUpdater {
}
/**
- * Records a mutation method and its parameters so that it can be executed later to modify the corresponding Zookeeper state.
- * Note the message is identical to the one used for communicating with Overseer (at least initially) so it also contains
- * the action in parameter {@link org.apache.solr.cloud.Overseer#QUEUE_OPERATION}, but that value is ignored here
- * in favor of the value passed in {@code command}.
+ * Records a mutation method and its parameters so that it can be executed later to modify the
+ * corresponding Zookeeper state. Note the message is identical to the one used for
+ * communicating with Overseer (at least initially) so it also contains the action in parameter
+ * {@link org.apache.solr.cloud.Overseer#QUEUE_OPERATION}, but that value is ignored here in
+ * favor of the value passed in {@code command}.
*
- * @param message the parameters associated with the command that are kept in the recorded mutations to be played
- * later. Note that this call usually replaces a call to {@link org.apache.solr.cloud.Overseer#offerStateUpdate(byte[])}
- * that is passed a <b>copy</b> of the data!<br>
- * This means that if {@code message} passed in here is reused before the recorded commands are replayed,
- * things will break! Need to make sure all places calling this method do not reuse the data passed in
- * (otherwise need to make a copy).
+ * @param message the parameters associated with the command that are kept in the recorded
+ * mutations to be played later. Note that this call usually replaces a call to {@link
+ * org.apache.solr.cloud.Overseer#offerStateUpdate(byte[])} that is passed a <b>copy</b> of
+ * the data!<br>
+ * This means that if {@code message} passed in here is reused before the recorded commands
+ * are replayed, things will break! Need to make sure all places calling this method do not
+ * reuse the data passed in (otherwise need to make a copy).
*/
public void record(MutatingCommand command, ZkNodeProps message) {
if (isCollectionCreation && !creationCommandRecorded) {
// First received command should be collection creation
if (!command.isCollectionCreation()) {
- final String err = "Internal bug. Creation of collection " + collectionName + " unexpected command " + command.name();
+ final String err =
+ "Internal bug. Creation of collection "
+ + collectionName
+ + " unexpected command "
+ + command.name();
log.error(err);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, err);
}
@@ -601,8 +700,16 @@ public class DistributedClusterStateUpdater {
} else {
// If collection creation already received or not expected, should not get (another) one
if (command.isCollectionCreation()) {
- final String err = "Internal bug. Creation of collection " + collectionName + " unexpected command " +
- command.name() + " (isCollectionCreation=" + isCollectionCreation + ", creationCommandRecorded=" + creationCommandRecorded + ")";
+ final String err =
+ "Internal bug. Creation of collection "
+ + collectionName
+ + " unexpected command "
+ + command.name()
+ + " (isCollectionCreation="
+ + isCollectionCreation
+ + ", creationCommandRecorded="
+ + creationCommandRecorded
+ + ")";
log.error(err);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, err);
}
@@ -610,8 +717,13 @@ public class DistributedClusterStateUpdater {
if (!collectionName.equals(command.getCollectionName(message))) {
// All recorded commands must be for same collection
- final String err = "Internal bug. State change for collection " + collectionName +
- " received command " + command + " for collection " + command.getCollectionName(message);
+ final String err =
+ "Internal bug. State change for collection "
+ + collectionName
+ + " received command "
+ + command
+ + " for collection "
+ + command.getCollectionName(message);
log.error(err);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, err);
}
@@ -620,20 +732,24 @@ public class DistributedClusterStateUpdater {
}
/**
- * This class allows taking the initial (passed in) cluster state, applying to it cluster mutations and returning the resulting
- * cluster state.
- * <p>
- * It is used to be able to try to apply multiple times a set of changes to cluster state when the Compare And Swap (conditional
- * update) fails due to concurrent modification.
- * <p>
- * For each mutation, a {@link ZkWriteCommand} is first created (capturing how the mutation impacts the cluster state), this is
- * the equivalent of what the Overseer is doing in ClusterStateUpdater.processMessage().<p>
+ * This class allows taking the initial (passed in) cluster state, applying to it cluster
+ * mutations and returning the resulting cluster state.
+ *
+ * <p>It is used to be able to try to apply multiple times a set of changes to cluster state
+ * when the Compare And Swap (conditional update) fails due to concurrent modification.
+ *
+ * <p>For each mutation, a {@link ZkWriteCommand} is first created (capturing how the mutation
+ * impacts the cluster state), this is the equivalent of what the Overseer is doing in
+ * ClusterStateUpdater.processMessage().
+ *
* <p>
- * Then, a new {@link ClusterState} is built by replacing the existing collection by its new value as computed in the
- * {@link ZkWriteCommand}. This is done by Overseer in {@link ZkStateWriter#enqueueUpdate} (and {@link ZkStateWriter} is hard
- * tu reuse because although it contains the logic for doing the update that would be needed here, it is coupled with the
- * actual instance of {@link ClusterState} being maintained, the stream of updates to be applied to it and applying
- * the per replica state changes).
+ *
+ * <p>Then, a new {@link ClusterState} is built by replacing the existing collection by its new
+ * value as computed in the {@link ZkWriteCommand}. This is done by Overseer in {@link
+ * ZkStateWriter#enqueueUpdate} (and {@link ZkStateWriter} is hard tu reuse because although it
+ * contains the logic for doing the update that would be needed here, it is coupled with the
+ * actual instance of {@link ClusterState} being maintained, the stream of updates to be applied
+ * to it and applying the per replica state changes).
*/
private static class RecordedMutationsPlayer implements StateChangeCalculator {
private final SolrCloudManager scm;
@@ -647,7 +763,11 @@ public class DistributedClusterStateUpdater {
// null means no updates needed to the per replica state znodes. Set in computeUpdates()
private List<PerReplicaStatesOps> replicaOpsList = null;
- RecordedMutationsPlayer(SolrCloudManager scm, String collectionName, boolean isCollectionCreation, List<Pair<MutatingCommand, ZkNodeProps>> mutations) {
+ RecordedMutationsPlayer(
+ SolrCloudManager scm,
+ String collectionName,
+ boolean isCollectionCreation,
+ List<Pair<MutatingCommand, ZkNodeProps>> mutations) {
this.scm = scm;
this.collectionName = collectionName;
this.isCollectionCreation = isCollectionCreation;
@@ -681,11 +801,16 @@ public class DistributedClusterStateUpdater {
perReplicaStateOps.add(zkcmd.ops);
}
} catch (Exception e) {
- // Seems weird to skip rather than fail, but that's what Overseer is doing (see ClusterStateUpdater.processQueueItem()).
- // Maybe in the new distributed update world we should make the caller fail? (something Overseer cluster state updater can't do)
- // To be reconsidered once Collection API commands are distributed because then cluster updates are done synchronously and
- // have the opportunity to make the Collection API call fail directly.
- log.error("Distributed cluster state update could not process the current clusterstate state update message, skipping the message: {}", message, e);
+ // Seems weird to skip rather than fail, but that's what Overseer is doing (see
+ // ClusterStateUpdater.processQueueItem()). Maybe in the new distributed update world we
+ // should make the caller fail? (something Overseer cluster state updater can't do) To
+ // be reconsidered once Collection API commands are distributed because then cluster
+ // updates are done synchronously and have the opportunity to make the Collection API
+ // call fail directly.
+ log.error(
+ "Distributed cluster state update could not process the current clusterstate state update message, skipping the message: {}",
+ message,
+ e);
}
}
@@ -705,36 +830,51 @@ public class DistributedClusterStateUpdater {
}
/**
- * Using optimistic locking (and retries when needed) updates Zookeeper with the changes previously recorded by calls
- * to {@link #record(MutatingCommand, ZkNodeProps)}.
+ * Using optimistic locking (and retries when needed) updates Zookeeper with the changes
+ * previously recorded by calls to {@link #record(MutatingCommand, ZkNodeProps)}.
*/
- public void executeStateUpdates(SolrCloudManager scm, ZkStateReader zkStateReader) throws KeeperException, InterruptedException {
+ public void executeStateUpdates(SolrCloudManager scm, ZkStateReader zkStateReader)
+ throws KeeperException, InterruptedException {
if (log.isDebugEnabled()) {
- log.debug("Executing updates for collection " + collectionName + ", is creation=" + isCollectionCreation + ", " + mutations.size() + " recorded mutations.", new Exception("StackTraceOnly")); // nowarn
+ log.debug(
+ "Executing updates for collection "
+ + collectionName
+ + ", is creation="
+ + isCollectionCreation
+ + ", "
+ + mutations.size()
+ + " recorded mutations.",
+ new Exception("StackTraceOnly")); // nowarn
}
if (mutations.isEmpty()) {
- final String err = "Internal bug. Unexpected empty set of mutations to apply for collection " + collectionName;
+ final String err =
+ "Internal bug. Unexpected empty set of mutations to apply for collection "
+ + collectionName;
log.error(err);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, err);
}
- RecordedMutationsPlayer mutationPlayer = new RecordedMutationsPlayer(scm, collectionName, isCollectionCreation, mutations);
+ RecordedMutationsPlayer mutationPlayer =
+ new RecordedMutationsPlayer(scm, collectionName, isCollectionCreation, mutations);
ZkUpdateApplicator.applyUpdate(zkStateReader, mutationPlayer);
// TODO update stats here for the various commands executed successfully or not?
- // This would replace the stats about cluster state updates that the Collection API currently makes available using
- // the OVERSEERSTATUS command, but obviously would be per node and will not have stats about queues (since there
- // will be no queues). Would be useful in some tests though, for example TestSkipOverseerOperations.
- // Probably better to rethink what types of stats are expected from a distributed system rather than trying to present
- // those previously provided by a central server in the system (the Overseer).
+ // This would replace the stats about cluster state updates that the Collection API currently
+ // makes available using the OVERSEERSTATUS command, but obviously would be per node and will
+ // not have stats about queues (since there will be no queues). Would be useful in some tests
+ // though, for example TestSkipOverseerOperations. Probably better to rethink what types of
+ // stats are expected from a distributed system rather than trying to present those previously
+ // provided by a central server in the system (the Overseer).
}
}
/**
- * This class handles the changes to be made as a result of a {@link OverseerAction#DOWNNODE} event.<p>
+ * This class handles the changes to be made as a result of a {@link OverseerAction#DOWNNODE}
+ * event.
*
- * Instances of this class deal with a single collection. Static method {@link #executeNodeDownStateUpdate} is the entry point
- * dealing with a node going down and processing all collections.
+ * <p>Instances of this class deal with a single collection. Static method {@link
+ * #executeNodeDownStateUpdate} is the entry point dealing with a node going down and processing
+ * all collections.
*/
private static class CollectionNodeDownChangeCalculator implements StateChangeCalculator {
private final String collectionName;
@@ -747,36 +887,44 @@ public class DistributedClusterStateUpdater {
private List<PerReplicaStatesOps> replicaOpsList = null;
/**
- * Entry point to mark all replicas of all collections present on a single node as being DOWN (because the node is down)
+ * Entry point to mark all replicas of all collections present on a single node as being DOWN
+ * (because the node is down)
*/
public static void executeNodeDownStateUpdate(String nodeName, ZkStateReader zkStateReader) {
- // This code does a version of what NodeMutator.downNode() is doing. We can't assume we have a cache of the collections,
- // so we're going to read all of them from ZK, fetch the state.json for each and if it has any replicas on the
- // failed node, do an update (conditional of course) of the state.json
+ // This code does a version of what NodeMutator.downNode() is doing. We can't assume we have a
+ // cache of the collections, so we're going to read all of them from ZK, fetch the state.json
+ // for each and if it has any replicas on the failed node, do an update (conditional of
+ // course) of the state.json
- // For Per Replica States collections there is still a need to read state.json, but the update of state.json is replaced
- // by a few znode deletions and creations. Might be faster or slower overall, depending on the number of impacted
- // replicas of such a collection and the total size of that collection's state.json.
+ // For Per Replica States collections there is still a need to read state.json, but the update
+ // of state.json is replaced by a few znode deletions and creations. Might be faster or slower
+ // overall, depending on the number of impacted replicas of such a collection and the total
+ // size of that collection's state.json.
- // Note code here also has to duplicate some of the work done in ZkStateReader because ZkStateReader couples reading of
- // the cluster state and maintaining a cached copy of the cluster state. Something likely to be refactored later (once
- // Overseer is totally removed and Zookeeper access patterns become clearer).
+ // Note code here also has to duplicate some of the work done in ZkStateReader because
+ // ZkStateReader couples reading of the cluster state and maintaining a cached copy of the
+ // cluster state. Something likely to be refactored later (once Overseer is totally removed
+ // and Zookeeper access patterns become clearer).
log.debug("DownNode state change invoked for node: {}", nodeName);
try {
- final List<String> collectionNames = zkStateReader.getZkClient().getChildren(COLLECTIONS_ZKNODE, null, true);
+ final List<String> collectionNames =
+ zkStateReader.getZkClient().getChildren(COLLECTIONS_ZKNODE, null, true);
- // Collections are totally independent of each other. Multiple threads could share the load here (need a ZK connection for each though).
+ // Collections are totally independent of each other. Multiple threads could share the load
+ // here (need a ZK connection for each though).
for (String collectionName : collectionNames) {
- CollectionNodeDownChangeCalculator collectionUpdater = new CollectionNodeDownChangeCalculator(collectionName, nodeName);
+ CollectionNodeDownChangeCalculator collectionUpdater =
+ new CollectionNodeDownChangeCalculator(collectionName, nodeName);
ZkUpdateApplicator.applyUpdate(zkStateReader, collectionUpdater);
}
} catch (Exception e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
- // Overseer behavior is to log an error and carry on when a message fails. See Overseer.ClusterStateUpdater.processQueueItem()
+ // Overseer behavior is to log an error and carry on when a message fails. See
+ // Overseer.ClusterStateUpdater.processQueueItem()
log.error("Could not successfully process DOWNNODE, giving up", e);
}
}
@@ -799,17 +947,30 @@ public class DistributedClusterStateUpdater {
@Override
public void computeUpdates(ClusterState clusterState, SolrZkClient client) {
final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
- Optional<ZkWriteCommand> result = docCollection != null ? NodeMutator.computeCollectionUpdate(nodeName, collectionName, docCollection, client) : Optional.empty();
+ Optional<ZkWriteCommand> result =
+ docCollection != null
+ ? NodeMutator.computeCollectionUpdate(nodeName, collectionName, docCollection, client)
+ : Optional.empty();
if (docCollection == null) {
- // This is possible but should be rare. Logging warn in case it is seen often and likely a sign of another issue
- log.warn("Processing DOWNNODE, collection " + collectionName + " disappeared during iteration"); // nowarn
+ // This is possible but should be rare. Logging warn in case it is seen often and likely a
+ // sign of another issue
+ log.warn(
+ "Processing DOWNNODE, collection "
+ + collectionName
+ + " disappeared during iteration"); // nowarn
}
if (result.isPresent()) {
ZkWriteCommand zkcmd = result.get();
- computedState = (zkcmd != ZkStateWriter.NO_OP) ? clusterState.copyWith(zkcmd.name, zkcmd.collection) : null;
- replicaOpsList = (zkcmd.ops != null && zkcmd.ops.get() != null) ? Collections.singletonList(zkcmd.ops) : null;
+ computedState =
+ (zkcmd != ZkStateWriter.NO_OP)
+ ? clusterState.copyWith(zkcmd.name, zkcmd.collection)
+ : null;
+ replicaOpsList =
+ (zkcmd.ops != null && zkcmd.ops.get() != null)
+ ? Collections.singletonList(zkcmd.ops)
+ : null;
} else {
computedState = null;
replicaOpsList = null;
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedCollectionLockFactory.java b/solr/core/src/java/org/apache/solr/cloud/DistributedCollectionLockFactory.java
index d87dafc..e49aba6 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedCollectionLockFactory.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedCollectionLockFactory.java
@@ -22,32 +22,45 @@ import org.apache.solr.common.params.CollectionParams;
public interface DistributedCollectionLockFactory {
/**
- * Create a new lock of the specified type (read or write) entering the "competition" for actually getting the lock at
- * the given level for the given path i.e. a lock at {@code collName} or a lock at {@code collName/shardId} or a lock
- * at {@code collName/shardId/replicaName}, depending on the passed {@code level}.<p>
+ * Create a new lock of the specified type (read or write) entering the "competition" for actually
+ * getting the lock at the given level for the given path i.e. a lock at {@code collName} or a
+ * lock at {@code collName/shardId} or a lock at {@code collName/shardId/replicaName}, depending
+ * on the passed {@code level}.
*
- * The paths are used to define which locks compete with each other (locks of equal paths compete).<p>
+ * <p>The paths are used to define which locks compete with each other (locks of equal paths
+ * compete).
*
- * Upon return from this call, the lock <b>has not been acquired</b> but the it had entered the lock acquiring "competition",
- * and the caller can decide to wait until the lock is granted by calling {@link DistributedLock#waitUntilAcquired()}.<br>
- * Separating the lock creation from lock acquisition allows a more deterministic release of the locks when/if they can't be
- * acquired.<p>
+ * <p>Upon return from this call, the lock <b>has not been acquired</b> but the it had entered the
+ * lock acquiring "competition", and the caller can decide to wait until the lock is granted by
+ * calling {@link DistributedLock#waitUntilAcquired()}.<br>
+ * Separating the lock creation from lock acquisition allows a more deterministic release of the
+ * locks when/if they can't be acquired.
+ *
+ * <p>Locks at different paths are independent of each other, multiple {@link DistributedLock} are
+ * therefore requested for a single operation and are packaged together and returned as an {@link
+ * DistributedMultiLock}, see {@link CollectionApiLockFactory#createCollectionApiLock}.
*
- * Locks at different paths are independent of each other, multiple {@link DistributedLock} are therefore requested for
- * a single operation and are packaged together and returned as an {@link DistributedMultiLock},
- * see {@link CollectionApiLockFactory#createCollectionApiLock}.
* @param isWriteLock {@code true} if requesting a write lock, {@code false} for a read lock.
* @param level The requested locking level. Can be one of:
- * <ul><li>{@link org.apache.solr.common.params.CollectionParams.LockLevel#COLLECTION}</li>
- * <li>{@link org.apache.solr.common.params.CollectionParams.LockLevel#SHARD}</li>
- * <li>{@link org.apache.solr.common.params.CollectionParams.LockLevel#REPLICA}</li></ul>
+ * <ul>
+ * <li>{@link org.apache.solr.common.params.CollectionParams.LockLevel#COLLECTION}
+ * <li>{@link org.apache.solr.common.params.CollectionParams.LockLevel#SHARD}
+ * <li>{@link org.apache.solr.common.params.CollectionParams.LockLevel#REPLICA}
+ * </ul>
+ *
* @param collName the collection name, can never be {@code null} as is needed for all locks.
- * @param shardId is ignored and can be {@code null} if {@code level} is {@link org.apache.solr.common.params.CollectionParams.LockLevel#COLLECTION}
- * @param replicaName is ignored and can be {@code null} if {@code level} is {@link org.apache.solr.common.params.CollectionParams.LockLevel#COLLECTION}
- * or {@link org.apache.solr.common.params.CollectionParams.LockLevel#SHARD}
+ * @param shardId is ignored and can be {@code null} if {@code level} is {@link
+ * org.apache.solr.common.params.CollectionParams.LockLevel#COLLECTION}
+ * @param replicaName is ignored and can be {@code null} if {@code level} is {@link
+ * org.apache.solr.common.params.CollectionParams.LockLevel#COLLECTION} or {@link
+ * org.apache.solr.common.params.CollectionParams.LockLevel#SHARD}
* @return a lock instance that must be {@link DistributedLock#release()}'ed in a {@code finally},
- * regardless of the lock having been acquired or not.
+ * regardless of the lock having been acquired or not.
*/
- DistributedLock createLock(boolean isWriteLock, CollectionParams.LockLevel level, String collName, String shardId,
- String replicaName);
+ DistributedLock createLock(
+ boolean isWriteLock,
+ CollectionParams.LockLevel level,
+ String collName,
+ String shardId,
+ String replicaName);
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedConfigSetLockFactory.java b/solr/core/src/java/org/apache/solr/cloud/DistributedConfigSetLockFactory.java
index 4f27527..dcead43 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedConfigSetLockFactory.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedConfigSetLockFactory.java
@@ -19,23 +19,25 @@ package org.apache.solr.cloud;
public interface DistributedConfigSetLockFactory {
/**
- * Create a new lock of the specified type (read or write) entering the "competition" for actually getting the lock for
- * the {@code configSetName}<p>
+ * Create a new lock of the specified type (read or write) entering the "competition" for actually
+ * getting the lock for the {@code configSetName}
*
- * Upon return from this call, the lock <b>has not been acquired</b> but the it had entered the lock acquiring "competition",
- * and the caller can decide to wait until the lock is granted by calling {@link DistributedLock#waitUntilAcquired()}.<br>
- * Separating the lock creation from lock acquisition allows a more deterministic release of the locks when/if they can't be
- * acquired.<p>
+ * <p>Upon return from this call, the lock <b>has not been acquired</b> but the it had entered the
+ * lock acquiring "competition", and the caller can decide to wait until the lock is granted by
+ * calling {@link DistributedLock#waitUntilAcquired()}.<br>
+ * Separating the lock creation from lock acquisition allows a more deterministic release of the
+ * locks when/if they can't be acquired.
*
- * Locks at different paths are independent of each other, multiple {@link DistributedLock} are therefore requested for
- * a single operation and are packaged together and returned as an {@link DistributedMultiLock},
- * see {@link org.apache.solr.cloud.api.collections.CollectionApiLockFactory#createCollectionApiLock} or
+ * <p>Locks at different paths are independent of each other, multiple {@link DistributedLock} are
+ * therefore requested for a single operation and are packaged together and returned as an {@link
+ * DistributedMultiLock}, see {@link
+ * org.apache.solr.cloud.api.collections.CollectionApiLockFactory#createCollectionApiLock} or
* {@link org.apache.solr.cloud.ConfigSetApiLockFactory#createConfigSetApiLock}.
*
* @param isWriteLock {@code true} if requesting a write lock, {@code false} for a read lock.
* @param configSetName the config set name, can never be {@code null}.
* @return a lock instance that must be {@link DistributedLock#release()}'ed in a {@code finally},
- * regardless of the lock having been acquired or not.
+ * regardless of the lock having been acquired or not.
*/
DistributedLock createLock(boolean isWriteLock, String configSetName);
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedLock.java b/solr/core/src/java/org/apache/solr/cloud/DistributedLock.java
index de618e7..1929766 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedLock.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedLock.java
@@ -17,11 +17,11 @@
package org.apache.solr.cloud;
-/**
- * A lock that can be used across cluster nodes.
- */
+/** A lock that can be used across cluster nodes. */
public interface DistributedLock {
void waitUntilAcquired();
+
void release();
+
boolean isAcquired();
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java b/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
index c9f12e9..ece2589 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
@@ -29,9 +29,8 @@ import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
/**
- * A distributed map.
- * This supports basic map functions e.g. get, put, contains for interaction with zk which
- * don't have to be ordered i.e. DistributedQueue.
+ * A distributed map. This supports basic map functions e.g. get, put, contains for interaction with
+ * zk which don't have to be ordered i.e. DistributedQueue.
*/
public class DistributedMap {
protected final String dir;
@@ -56,18 +55,21 @@ public class DistributedMap {
this.zookeeper = zookeeper;
}
-
public void put(String trackingId, byte[] data) throws KeeperException, InterruptedException {
- zookeeper.makePath(dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, false, true);
+ zookeeper.makePath(
+ dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, false, true);
}
-
+
/**
* Puts an element in the map only if there isn't one with the same trackingId already
+ *
* @return True if the the element was added. False if it wasn't (because the key already exists)
*/
- public boolean putIfAbsent(String trackingId, byte[] data) throws KeeperException, InterruptedException {
+ public boolean putIfAbsent(String trackingId, byte[] data)
+ throws KeeperException, InterruptedException {
try {
- zookeeper.makePath(dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, true, true);
+ zookeeper.makePath(
+ dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, true, true);
return true;
} catch (NodeExistsException e) {
return false;
@@ -89,9 +91,8 @@ public class DistributedMap {
}
/**
- * return true if the znode was successfully deleted
- * false if the node didn't exist and therefore not deleted
- * exception an exception occurred while deleting
+ * return true if the znode was successfully deleted false if the node didn't exist and therefore
+ * not deleted exception an exception occurred while deleting
*/
public boolean remove(String trackingId) throws KeeperException, InterruptedException {
try {
@@ -102,26 +103,19 @@ public class DistributedMap {
return true;
}
- /**
- * Helper method to clear all child nodes for a parent node.
- */
+ /** Helper method to clear all child nodes for a parent node. */
public void clear() throws KeeperException, InterruptedException {
List<String> childNames = zookeeper.getChildren(dir, null, true);
- for(String childName: childNames) {
+ for (String childName : childNames) {
zookeeper.delete(dir + "/" + childName, -1, true);
}
-
}
-
- /**
- * Returns the keys of all the elements in the map
- */
+
+ /** Returns the keys of all the elements in the map */
public Collection<String> keys() throws KeeperException, InterruptedException {
List<String> childs = zookeeper.getChildren(dir, null, true);
final List<String> ids = new ArrayList<>(childs.size());
childs.stream().forEach((child) -> ids.add(child.substring(PREFIX.length())));
return ids;
-
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedMultiLock.java b/solr/core/src/java/org/apache/solr/cloud/DistributedMultiLock.java
index d0fe596..9979c14 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedMultiLock.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedMultiLock.java
@@ -17,17 +17,16 @@
package org.apache.solr.cloud;
+import com.google.common.annotations.VisibleForTesting;
import java.lang.invoke.MethodHandles;
import java.util.List;
-
-import com.google.common.annotations.VisibleForTesting;
import org.apache.solr.common.SolrException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * A lock as acquired for running a single API command (Collection or Config Set or anything else in the future).
- * Internally it is composed of multiple {@link DistributedLock}'s.
+ * A lock as acquired for running a single API command (Collection or Config Set or anything else in
+ * the future). Internally it is composed of multiple {@link DistributedLock}'s.
*/
public class DistributedMultiLock {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -40,7 +39,8 @@ public class DistributedMultiLock {
public void waitUntilAcquired() {
if (isReleased) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Released lock can't be waited upon");
+ throw new SolrException(
+ SolrException.ErrorCode.SERVER_ERROR, "Released lock can't be waited upon");
}
for (DistributedLock lock : locks) {
@@ -59,7 +59,8 @@ public class DistributedMultiLock {
public boolean isAcquired() {
if (isReleased) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Released lock can't be tested");
+ throw new SolrException(
+ SolrException.ErrorCode.SERVER_ERROR, "Released lock can't be tested");
}
for (DistributedLock lock : locks) {
if (!lock.isAcquired()) {
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
index 1398570..dd89d4c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
@@ -35,8 +35,12 @@ public abstract class ElectionContext implements Closeable {
volatile String leaderSeqPath;
private SolrZkClient zkClient;
- public ElectionContext(final String coreNodeName,
- final String electionPath, final String leaderPath, final ZkNodeProps leaderProps, final SolrZkClient zkClient) {
+ public ElectionContext(
+ final String coreNodeName,
+ final String electionPath,
+ final String leaderPath,
+ final ZkNodeProps leaderProps,
+ final SolrZkClient zkClient) {
assert zkClient != null;
this.id = coreNodeName;
this.electionPath = electionPath;
@@ -44,11 +48,9 @@ public abstract class ElectionContext implements Closeable {
this.leaderProps = leaderProps;
this.zkClient = zkClient;
}
-
- public void close() {
- }
-
+ public void close() {}
+
public void cancelElection() throws InterruptedException, KeeperException {
if (leaderSeqPath != null) {
try {
@@ -63,15 +65,14 @@ public abstract class ElectionContext implements Closeable {
}
}
- abstract void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs) throws KeeperException, InterruptedException, IOException;
+ abstract void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs)
+ throws KeeperException, InterruptedException, IOException;
public void checkIfIamLeaderFired() {}
public void joinedElectionFired() {}
- public ElectionContext copy(){
+ public ElectionContext copy() {
throw new UnsupportedOperationException("copy");
}
}
-
-
diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
index b7199ee..e4335f1 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
@@ -17,6 +17,10 @@
package org.apache.solr.cloud;
+import static org.apache.solr.cloud.api.collections.CollectionHandlingUtils.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.api.collections.CollectionHandlingUtils.SHARD_UNIQUE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
+
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.HashMap;
@@ -27,7 +31,6 @@ import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
-
import org.apache.commons.lang3.StringUtils;
import org.apache.solr.cloud.overseer.ClusterStateMutator;
import org.apache.solr.cloud.overseer.CollectionMutator;
@@ -43,11 +46,8 @@ import org.apache.solr.common.params.CollectionAdminParams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.cloud.api.collections.CollectionHandlingUtils.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.api.collections.CollectionHandlingUtils.SHARD_UNIQUE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
-
-// Class to encapsulate processing replica properties that have at most one replica hosting a property per slice.
+// Class to encapsulate processing replica properties that have at most one replica hosting a
+// property per slice.
class ExclusiveSliceProperty {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private ClusterState clusterState;
@@ -56,12 +56,14 @@ class ExclusiveSliceProperty {
private final DocCollection collection;
private final String collectionName;
- // Key structure. For each node, list all replicas on it regardless of whether they have the property or not.
+ // Key structure. For each node, list all replicas on it regardless of whether they have the
+ // property or not.
private final Map<String, List<SliceReplica>> nodesHostingReplicas = new HashMap<>();
// Key structure. For each node, a list of the replicas _currently_ hosting the property.
private final Map<String, List<SliceReplica>> nodesHostingProp = new HashMap<>();
Set<String> shardsNeedingHosts = new HashSet<>();
- Map<String, Slice> changedSlices = new HashMap<>(); // Work on copies rather than the underlying cluster state.
+ // Work on copies rather than the underlying cluster state.
+ Map<String, Slice> changedSlices = new HashMap<>();
private int origMaxPropPerNode = 0;
private int origModulo = 0;
@@ -81,29 +83,43 @@ class ExclusiveSliceProperty {
collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
if (StringUtils.isBlank(collectionName) || StringUtils.isBlank(property)) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Overseer '" + message.getStr(Overseer.QUEUE_OPERATION) + "' requires both the '" + ZkStateReader.COLLECTION_PROP + "' and '" +
- ZkStateReader.PROPERTY_PROP + "' parameters. No action taken ");
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Overseer '"
+ + message.getStr(Overseer.QUEUE_OPERATION)
+ + "' requires both the '"
+ + ZkStateReader.COLLECTION_PROP
+ + "' and '"
+ + ZkStateReader.PROPERTY_PROP
+ + "' parameters. No action taken ");
}
Boolean shardUnique = Boolean.parseBoolean(message.getStr(SHARD_UNIQUE));
- if (shardUnique == false &&
- SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(this.property) == false) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
- + " the property be a pre-defined property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true' " +
- " Property: " + this.property + " shardUnique: " + Boolean.toString(shardUnique));
+ if (shardUnique == false
+ && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(this.property) == false) {
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Balancing properties amongst replicas in a slice requires that"
+ + " the property be a pre-defined property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true' "
+ + " Property: "
+ + this.property
+ + " shardUnique: "
+ + Boolean.toString(shardUnique));
}
collection = clusterState.getCollection(collectionName);
if (collection == null) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Could not find collection ' " + collectionName + "' for overseer operation '" +
- message.getStr(Overseer.QUEUE_OPERATION) + "'. No action taken.");
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Could not find collection ' "
+ + collectionName
+ + "' for overseer operation '"
+ + message.getStr(Overseer.QUEUE_OPERATION)
+ + "'. No action taken.");
}
onlyActiveNodes = Boolean.parseBoolean(message.getStr(ONLY_ACTIVE_NODES, "true"));
}
-
DocCollection getDocCollection() {
return collection;
}
@@ -112,12 +128,12 @@ class ExclusiveSliceProperty {
return replica.getState() == Replica.State.ACTIVE;
}
- // Collect a list of all the nodes that _can_ host the indicated property. Along the way, also collect any of
- // the replicas on that node that _already_ host the property as well as any slices that do _not_ have the
- // property hosted.
+ // Collect a list of all the nodes that _can_ host the indicated property. Along the way, also
+ // collect any of the replicas on that node that _already_ host the property as well as any slices
+ // that do _not_ have the property hosted.
//
- // Return true if anything node needs it's property reassigned. False if the property is already balanced for
- // the collection.
+ // Return true if anything node needs it's property reassigned. False if the property is already
+ // balanced for the collection.
private boolean collectCurrentPropStats() {
int maxAssigned = 0;
@@ -129,7 +145,8 @@ class ExclusiveSliceProperty {
for (Replica replica : slice.getReplicas()) {
if (onlyActiveNodes && isActive(replica) == false) {
if (StringUtils.isNotBlank(replica.getStr(property))) {
- removeProp(slice, replica.getName()); // Note, we won't be committing this to ZK until later.
+ // Note, we won't be committing this to ZK until later.
+ removeProp(slice, replica.getName());
}
continue;
}
@@ -137,9 +154,12 @@ class ExclusiveSliceProperty {
String nodeName = replica.getNodeName();
if (StringUtils.isNotBlank(replica.getStr(property))) {
if (sliceHasProp) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "'" + BALANCESHARDUNIQUE + "' should only be called for properties that have at most one member " +
- "in any slice with the property set. No action taken.");
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "'"
+ + BALANCESHARDUNIQUE
+ + "' should only be called for properties that have at most one member "
+ + "in any slice with the property set. No action taken.");
}
if (nodesHostingProp.containsKey(nodeName) == false) {
nodesHostingProp.put(nodeName, new ArrayList<>());
@@ -157,24 +177,26 @@ class ExclusiveSliceProperty {
}
// If the total number of already-hosted properties assigned to nodes
- // that have potential to host leaders is equal to the slice count _AND_ none of the current nodes has more than
- // the max number of properties, there's nothing to do.
+ // that have potential to host leaders is equal to the slice count _AND_ none of the current
+ // nodes has more than the max number of properties, there's nothing to do.
origMaxPropPerNode = collection.getSlices().size() / allHosts.size();
// Some nodes can have one more of the proeprty if the numbers aren't exactly even.
origModulo = collection.getSlices().size() % allHosts.size();
if (origModulo > 0) {
- origMaxPropPerNode++; // have to have some nodes with 1 more property.
+ origMaxPropPerNode++; // have to have some nodes with 1 more property.
}
- // We can say for sure that we need to rebalance if we don't have as many assigned properties as slices.
+ // We can say for sure that we need to rebalance if we don't have as many assigned properties as
+ // slices.
if (assigned != collection.getSlices().size()) {
return true;
}
// Make sure there are no more slices at the limit than the "leftovers"
- // Let's say there's 7 slices and 3 nodes. We need to distribute the property as 3 on node1, 2 on node2 and 2 on node3
- // (3, 2, 2) We need to be careful to not distribute them as 3, 3, 1. that's what this check is all about.
+ // Let's say there's 7 slices and 3 nodes. We need to distribute the property as 3 on node1, 2
+ // on node2 and 2 on node3 (3, 2, 2) We need to be careful to not distribute them as 3, 3, 1.
+ // that's what this check is all about.
int counter = origModulo;
for (List<SliceReplica> list : nodesHostingProp.values()) {
if (list.size() == origMaxPropPerNode) --counter;
@@ -190,8 +212,7 @@ class ExclusiveSliceProperty {
ListIterator<SliceReplica> iter = entReplica.getValue().listIterator();
while (iter.hasNext()) {
SliceReplica sr = iter.next();
- if (sr.slice.getName().equals(sliceName))
- iter.remove();
+ if (sr.slice.getName().equals(sliceName)) iter.remove();
}
}
}
@@ -208,8 +229,8 @@ class ExclusiveSliceProperty {
SliceReplica srToChange = null;
for (String slice : shardsNeedingHosts) {
for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
- // A little tricky. If we don't set this to something below, then it means all possible places to
- // put this property are full up, so just put it somewhere.
+ // A little tricky. If we don't set this to something below, then it means all possible
+ // places to put this property are full up, so just put it somewhere.
if (srToChange == null && ent.getValue().size() > 0) {
srToChange = ent.getValue().get(0);
}
@@ -222,7 +243,8 @@ class ExclusiveSliceProperty {
if (nodesHostingProp.containsKey(ent.getKey()) == false) {
nodesHostingProp.put(ent.getKey(), new ArrayList<SliceReplica>());
}
- if (minSize > nodesHostingReplicas.get(ent.getKey()).size() && nodesHostingProp.get(ent.getKey()).size() < tmpMaxPropPerNode) {
+ if (minSize > nodesHostingReplicas.get(ent.getKey()).size()
+ && nodesHostingProp.get(ent.getKey()).size() < tmpMaxPropPerNode) {
minSize = nodesHostingReplicas.get(ent.getKey()).size();
srToChange = sr;
nodeName = ent.getKey();
@@ -260,12 +282,13 @@ class ExclusiveSliceProperty {
--tmpModulo;
if (tmpModulo == 0) {
--tmpMaxPropPerNode;
- --tmpModulo; // Prevent dropping tmpMaxPropPerNode again.
+ --tmpModulo; // Prevent dropping tmpMaxPropPerNode again.
}
}
}
- // Go through the list of presently-hosted properties and remove any that have too many replicas that host the property
+ // Go through the list of presently-hosted properties and remove any that have too many replicas
+ // that host the property
private void removeOverallocatedReplicas() {
tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
tmpModulo = origModulo;
@@ -280,14 +303,19 @@ class ExclusiveSliceProperty {
private void removeProp(Slice origSlice, String replicaName) {
if (log.isDebugEnabled()) {
- log.debug("Removing property {} from slice {}, replica {}", property, origSlice.getName(), replicaName);
+ log.debug(
+ "Removing property {} from slice {}, replica {}",
+ property,
+ origSlice.getName(),
+ replicaName);
}
getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property);
}
private void addProp(Slice origSlice, String replicaName) {
if (log.isDebugEnabled()) {
- log.debug("Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName);
+ log.debug(
+ "Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName);
}
getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true");
}
@@ -300,18 +328,28 @@ class ExclusiveSliceProperty {
if (newSlice != null) {
replica = newSlice.getReplica(replicaName);
} else {
- newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy(), origSlice.collection);
+ newSlice =
+ new Slice(
+ origSlice.getName(),
+ origSlice.getReplicasCopy(),
+ origSlice.shallowCopy(),
+ origSlice.collection);
changedSlices.put(origSlice.getName(), newSlice);
replica = newSlice.getReplica(replicaName);
}
if (replica == null) {
- throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Should have been able to find replica '" +
- replicaName + "' in slice '" + origSlice.getName() + "'. No action taken");
+ throw new SolrException(
+ SolrException.ErrorCode.INVALID_STATE,
+ "Should have been able to find replica '"
+ + replicaName
+ + "' in slice '"
+ + origSlice.getName()
+ + "'. No action taken");
}
return replica;
-
}
- // Main entry point for carrying out the action. Returns "true" if we have actually moved properties around.
+ // Main entry point for carrying out the action. Returns "true" if we have actually moved
+ // properties around.
boolean balanceProperty() {
if (collectCurrentPropStats() == false) {
@@ -325,8 +363,8 @@ class ExclusiveSliceProperty {
// So, remove a replica from the nodes that have too many
removeOverallocatedReplicas();
- // prune replicas belonging to a slice that have the property currently assigned from the list of replicas
- // that could host the property.
+ // prune replicas belonging to a slice that have the property currently assigned from the list
+ // of replicas that could host the property.
for (Map.Entry<String, List<SliceReplica>> entProp : nodesHostingProp.entrySet()) {
for (SliceReplica srHosting : entProp.getValue()) {
removeSliceAlreadyHostedFromPossibles(srHosting.slice.getName());
@@ -342,12 +380,14 @@ class ExclusiveSliceProperty {
}
}
- // At this point, nodesHostingProp should contain _only_ lists of replicas that belong to slices that do _not_
- // have any replica hosting the property. So let's assign them.
+ // At this point, nodesHostingProp should contain _only_ lists of replicas that belong to slices
+ // that do _not_ have any replica hosting the property. So let's assign them.
balanceUnassignedReplicas();
for (Slice newSlice : changedSlices.values()) {
- DocCollection docCollection = CollectionMutator.updateSlice(collectionName, clusterState.getCollection(collectionName), newSlice);
+ DocCollection docCollection =
+ CollectionMutator.updateSlice(
+ collectionName, clusterState.getCollection(collectionName), newSlice);
clusterState = ClusterStateMutator.newState(clusterState, collectionName, docCollection);
}
return true;
@@ -361,9 +401,17 @@ class ExclusiveSliceProperty {
this.slice = slice;
this.replica = replica;
}
+
public String toString() {
StringBuilder sb = new StringBuilder(System.lineSeparator()).append(System.lineSeparator());
- sb.append(" :").append(System.lineSeparator()).append("slice: ").append(slice.toString()).append(System.lineSeparator()).append(" replica: ").append(replica.toString()).append(System.lineSeparator());
+ sb.append(" :")
+ .append(System.lineSeparator())
+ .append("slice: ")
+ .append(slice.toString())
+ .append(System.lineSeparator())
+ .append(" replica: ")
+ .append(replica.toString())
+ .append(System.lineSeparator());
return sb.toString();
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java b/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
index e55ce2b..f5a3eba 100644
--- a/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
+++ b/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
@@ -25,7 +25,6 @@ import java.util.Map;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import org.apache.solr.cloud.ZkController.ContextKey;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.SolrException;
@@ -42,45 +41,43 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Leader Election process. This class contains the logic by which a
- * leader is chosen. First call {@link #setup(ElectionContext)} to ensure
- * the election process is init'd. Next call
- * {@link #joinElection(ElectionContext, boolean)} to start the leader election.
- *
- * The implementation follows the classic ZooKeeper recipe of creating an
- * ephemeral, sequential node for each candidate and then looking at the set
- * of such nodes - if the created node is the lowest sequential node, the
- * candidate that created the node is the leader. If not, the candidate puts
- * a watch on the next lowest node it finds, and if that node goes down,
- * starts the whole process over by checking if it's the lowest sequential node, etc.
- *
+ * Leader Election process. This class contains the logic by which a leader is chosen. First call
+ * {@link #setup(ElectionContext)} to ensure the election process is init'd. Next call {@link
+ * #joinElection(ElectionContext, boolean)} to start the leader election.
+ *
+ * <p>The implementation follows the classic ZooKeeper recipe of creating an ephemeral, sequential
+ * node for each candidate and then looking at the set of such nodes - if the created node is the
+ * lowest sequential node, the candidate that created the node is the leader. If not, the candidate
+ * puts a watch on the next lowest node it finds, and if that node goes down, starts the whole
+ * process over by checking if it's the lowest sequential node, etc.
*/
-public class LeaderElector {
+public class LeaderElector {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
+
static final String ELECTION_NODE = "/election";
-
- public final static Pattern LEADER_SEQ = Pattern.compile(".*?/?.*?-n_(\\d+)");
- private final static Pattern SESSION_ID = Pattern.compile(".*?/?(.*?-.*?)-n_\\d+");
- private final static Pattern NODE_NAME = Pattern.compile(".*?/?(.*?-)(.*?)-n_\\d+");
+
+ public static final Pattern LEADER_SEQ = Pattern.compile(".*?/?.*?-n_(\\d+)");
+ private static final Pattern SESSION_ID = Pattern.compile(".*?/?(.*?-.*?)-n_\\d+");
+ private static final Pattern NODE_NAME = Pattern.compile(".*?/?(.*?-)(.*?)-n_\\d+");
protected SolrZkClient zkClient;
-
+
private ZkCmdExecutor zkCmdExecutor;
private volatile ElectionContext context;
private ElectionWatcher watcher;
- private Map<ContextKey,ElectionContext> electionContexts;
+ private Map<ContextKey, ElectionContext> electionContexts;
private ContextKey contextKey;
public LeaderElector(SolrZkClient zkClient) {
this.zkClient = zkClient;
zkCmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
}
-
- public LeaderElector(SolrZkClient zkClient, ContextKey key, Map<ContextKey,ElectionContext> electionContexts) {
+
+ public LeaderElector(
+ SolrZkClient zkClient, ContextKey key, Map<ContextKey, ElectionContext> electionContexts) {
this.zkClient = zkClient;
zkCmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
this.electionContexts = electionContexts;
@@ -92,22 +89,22 @@ public class LeaderElector {
}
/**
- * Check if the candidate with the given n_* sequence number is the leader.
- * If it is, set the leaderId on the leader zk node. If it is not, start
- * watching the candidate that is in line before this one - if it goes down, check
- * if this candidate is the leader again.
+ * Check if the candidate with the given n_* sequence number is the leader. If it is, set the
+ * leaderId on the leader zk node. If it is not, start watching the candidate that is in line
+ * before this one - if it goes down, check if this candidate is the leader again.
*
* @param replacement has someone else been the leader already?
*/
- private void checkIfIamLeader(final ElectionContext context, boolean replacement) throws KeeperException,
- InterruptedException, IOException {
+ private void checkIfIamLeader(final ElectionContext context, boolean replacement)
+ throws KeeperException, InterruptedException, IOException {
context.checkIfIamLeaderFired();
// get all other numbers...
final String holdElectionPath = context.electionPath + ELECTION_NODE;
List<String> seqs = zkClient.getChildren(holdElectionPath, null, true);
sortSeqs(seqs);
- String leaderSeqNodeName = context.leaderSeqPath.substring(context.leaderSeqPath.lastIndexOf('/') + 1);
+ String leaderSeqNodeName =
+ context.leaderSeqPath.substring(context.leaderSeqPath.lastIndexOf('/') + 1);
if (!seqs.contains(leaderSeqNodeName)) {
log.warn("Our node is no longer in line to be leader");
return;
@@ -137,7 +134,7 @@ public class LeaderElector {
if (zkClient.isClosed()) return; // but our zkClient is already closed
runIamLeaderProcess(context, replacement);
} catch (KeeperException.NodeExistsException e) {
- log.error("node exists",e);
+ log.error("node exists", e);
retryElection(context, false);
return;
}
@@ -152,7 +149,13 @@ public class LeaderElector {
}
try {
String watchedNode = holdElectionPath + "/" + toWatch;
- zkClient.getData(watchedNode, watcher = new ElectionWatcher(context.leaderSeqPath, watchedNode, getSeq(context.leaderSeqPath), context), null, true);
+ zkClient.getData(
+ watchedNode,
+ watcher =
+ new ElectionWatcher(
+ context.leaderSeqPath, watchedNode, getSeq(context.leaderSeqPath), context),
+ null,
+ true);
log.debug("Watching path {} to know if I could be the leader", watchedNode);
} catch (KeeperException.SessionExpiredException e) {
throw e;
@@ -167,14 +170,14 @@ public class LeaderElector {
}
}
- protected void runIamLeaderProcess(final ElectionContext context, boolean weAreReplacement) throws KeeperException,
- InterruptedException, IOException {
- context.runLeaderProcess(weAreReplacement,0);
+ protected void runIamLeaderProcess(final ElectionContext context, boolean weAreReplacement)
+ throws KeeperException, InterruptedException, IOException {
+ context.runLeaderProcess(weAreReplacement, 0);
}
-
+
/**
* Returns int given String of form n_0000000001 or n_0000000003, etc.
- *
+ *
* @return sequence number
*/
public static int getSeq(String nStringSequence) {
@@ -183,55 +186,52 @@ public class LeaderElector {
if (m.matches()) {
seq = Integer.parseInt(m.group(1));
} else {
- throw new IllegalStateException("Could not find regex match in:"
- + nStringSequence);
+ throw new IllegalStateException("Could not find regex match in:" + nStringSequence);
}
return seq;
}
-
+
private String getNodeId(String nStringSequence) {
String id;
Matcher m = SESSION_ID.matcher(nStringSequence);
if (m.matches()) {
id = m.group(1);
} else {
- throw new IllegalStateException("Could not find regex match in:"
- + nStringSequence);
+ throw new IllegalStateException("Could not find regex match in:" + nStringSequence);
}
return id;
}
- public static String getNodeName(String nStringSequence){
+ public static String getNodeName(String nStringSequence) {
String result;
Matcher m = NODE_NAME.matcher(nStringSequence);
if (m.matches()) {
result = m.group(2);
} else {
- throw new IllegalStateException("Could not find regex match in:"
- + nStringSequence);
+ throw new IllegalStateException("Could not find regex match in:" + nStringSequence);
}
return result;
-
}
-
- public int joinElection(ElectionContext context, boolean replacement) throws KeeperException, InterruptedException, IOException {
- return joinElection(context,replacement, false);
+
+ public int joinElection(ElectionContext context, boolean replacement)
+ throws KeeperException, InterruptedException, IOException {
+ return joinElection(context, replacement, false);
}
- /**
- * Begin participating in the election process. Gets a new sequential number
- * and begins watching the node with the sequence number before it, unless it
- * is the lowest number, in which case, initiates the leader process. If the
- * node that is watched goes down, check if we are the new lowest node, else
- * watch the next lowest numbered node.
- *
- * @return sequential node number
- */
- public int joinElection(ElectionContext context, boolean replacement,boolean joinAtHead) throws KeeperException, InterruptedException, IOException {
+ /**
+ * Begin participating in the election process. Gets a new sequential number and begins watching
+ * the node with the sequence number before it, unless it is the lowest number, in which case,
+ * initiates the leader process. If the node that is watched goes down, check if we are the new
+ * lowest node, else watch the next lowest numbered node.
+ *
+ * @return sequential node number
+ */
+ public int joinElection(ElectionContext context, boolean replacement, boolean joinAtHead)
+ throws KeeperException, InterruptedException, IOException {
context.joinedElectionFired();
-
+
final String shardsElectZkPath = context.electionPath + LeaderElector.ELECTION_NODE;
-
+
long sessionId = zkClient.getSolrZooKeeper().getSessionId();
String id = sessionId + "-" + context.id;
String leaderSeqPath = null;
@@ -239,26 +239,34 @@ public class LeaderElector {
int tries = 0;
while (cont) {
try {
- if(joinAtHead){
+ if (joinAtHead) {
log.debug("Node {} trying to join election at the head", id);
- List<String> nodes = OverseerTaskProcessor.getSortedElectionNodes(zkClient, shardsElectZkPath);
- if(nodes.size() <2){
- leaderSeqPath = zkClient.create(shardsElectZkPath + "/" + id + "-n_", null,
- CreateMode.EPHEMERAL_SEQUENTIAL, false);
+ List<String> nodes =
+ OverseerTaskProcessor.getSortedElectionNodes(zkClient, shardsElectZkPath);
+ if (nodes.size() < 2) {
+ leaderSeqPath =
+ zkClient.create(
+ shardsElectZkPath + "/" + id + "-n_",
+ null,
+ CreateMode.EPHEMERAL_SEQUENTIAL,
+ false);
} else {
String firstInLine = nodes.get(1);
log.debug("The current head: {}", firstInLine);
Matcher m = LEADER_SEQ.matcher(firstInLine);
if (!m.matches()) {
- throw new IllegalStateException("Could not find regex match in:"
- + firstInLine);
+ throw new IllegalStateException("Could not find regex match in:" + firstInLine);
}
- leaderSeqPath = shardsElectZkPath + "/" + id + "-n_"+ m.group(1);
+ leaderSeqPath = shardsElectZkPath + "/" + id + "-n_" + m.group(1);
zkClient.create(leaderSeqPath, null, CreateMode.EPHEMERAL, false);
}
} else {
- leaderSeqPath = zkClient.create(shardsElectZkPath + "/" + id + "-n_", null,
- CreateMode.EPHEMERAL_SEQUENTIAL, false);
+ leaderSeqPath =
+ zkClient.create(
+ shardsElectZkPath + "/" + id + "-n_",
+ null,
+ CreateMode.EPHEMERAL_SEQUENTIAL,
+ false);
}
log.debug("Joined leadership election with path: {}", leaderSeqPath);
@@ -267,21 +275,20 @@ public class LeaderElector {
} catch (ConnectionLossException e) {
// we don't know if we made our node or not...
List<String> entries = zkClient.getChildren(shardsElectZkPath, null, true);
-
+
boolean foundId = false;
for (String entry : entries) {
String nodeId = getNodeId(entry);
if (id.equals(nodeId)) {
// we did create our node...
- foundId = true;
+ foundId = true;
break;
}
}
if (!foundId) {
cont = true;
if (tries++ > 20) {
- throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
- "", e);
+ throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
try {
Thread.sleep(50);
@@ -295,8 +302,7 @@ public class LeaderElector {
// be working on it, lets try again
if (tries++ > 20) {
context = null;
- throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
- "", e);
+ throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
cont = true;
try {
@@ -312,7 +318,7 @@ public class LeaderElector {
}
private class ElectionWatcher implements Watcher {
- final String myNode,watchedNode;
+ final String myNode, watchedNode;
final ElectionContext context;
private boolean canceled = false;
@@ -325,7 +331,6 @@ public class LeaderElector {
void cancel() {
canceled = true;
-
}
@Override
@@ -358,30 +363,26 @@ public class LeaderElector {
}
}
- /**
- * Set up any ZooKeeper nodes needed for leader election.
- */
- public void setup(final ElectionContext context) throws InterruptedException,
- KeeperException {
+ /** Set up any ZooKeeper nodes needed for leader election. */
+ public void setup(final ElectionContext context) throws InterruptedException, KeeperException {
String electZKPath = context.electionPath + LeaderElector.ELECTION_NODE;
if (context instanceof OverseerElectionContext) {
zkCmdExecutor.ensureExists(electZKPath, zkClient);
} else {
// we use 2 param so that replica won't create /collection/{collection} if it doesn't exist
- zkCmdExecutor.ensureExists(electZKPath, (byte[])null, CreateMode.PERSISTENT, zkClient, 2);
+ zkCmdExecutor.ensureExists(electZKPath, (byte[]) null, CreateMode.PERSISTENT, zkClient, 2);
}
this.context = context;
}
-
- /**
- * Sort n string sequence list.
- */
+
+ /** Sort n string sequence list. */
public static void sortSeqs(List<String> seqs) {
seqs.sort(Comparator.comparingInt(LeaderElector::getSeq).thenComparing(Function.identity()));
}
- void retryElection(ElectionContext context, boolean joinAtHead) throws KeeperException, InterruptedException, IOException {
+ void retryElection(ElectionContext context, boolean joinAtHead)
+ throws KeeperException, InterruptedException, IOException {
ElectionWatcher watcher = this.watcher;
ElectionContext ctx = context.copy();
if (electionContexts != null) {
@@ -393,5 +394,4 @@ public class LeaderElector {
this.context = ctx;
joinElection(ctx, true, joinAtHead);
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/LockTree.java b/solr/core/src/java/org/apache/solr/cloud/LockTree.java
index 1ada7d7..bff760b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/LockTree.java
+++ b/solr/core/src/java/org/apache/solr/cloud/LockTree.java
@@ -22,7 +22,6 @@ import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-
import org.apache.solr.cloud.OverseerMessageHandler.Lock;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CollectionParams.LockLevel;
@@ -31,9 +30,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * This is a utility class that offers fine grained locking for various Collection Operations
- * This class is designed for single threaded operation. It's safe for multiple threads to use it
- * but internally it is synchronized so that only one thread can perform any operation.
+ * This is a utility class that offers fine grained locking for various Collection Operations This
+ * class is designed for single threaded operation. It's safe for multiple threads to use it but
+ * internally it is synchronized so that only one thread can perform any operation.
*/
public class LockTree {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -42,7 +41,7 @@ public class LockTree {
private class LockImpl implements Lock {
final Node node;
- LockImpl( Node node) {
+ LockImpl(Node node) {
this.node = node;
}
@@ -59,15 +58,15 @@ public class LockTree {
}
}
-
/**
- * This class is used to mark nodes for which acquiring a lock was attempted but didn't succeed. Lock acquisition failure
- * needs to be "remembered" to trigger failures to acquire a competing lock until the Session is replaced, to prevent
- * tasks enqueued later (and dequeued later once the busy lock got released) from being executed before earlier tasks
- * that failed to execute because the lock wasn't available earlier when they attempted to acquire it.<p>
+ * This class is used to mark nodes for which acquiring a lock was attempted but didn't succeed.
+ * Lock acquisition failure needs to be "remembered" to trigger failures to acquire a competing
+ * lock until the Session is replaced, to prevent tasks enqueued later (and dequeued later once
+ * the busy lock got released) from being executed before earlier tasks that failed to execute
+ * because the lock wasn't available earlier when they attempted to acquire it.
*
- * A new Session is created each time the iteration over the queue tasks is restarted starting at the oldest non
- * running or completed tasks.
+ * <p>A new Session is created each time the iteration over the queue tasks is restarted starting
+ * at the oldest non running or completed tasks.
*/
public class Session {
private SessionNode root = new SessionNode(LockLevel.CLUSTER);
@@ -93,10 +92,12 @@ public class LockTree {
}
/**
- * Marks busy the SessionNode corresponding to <code>lockLevel</code> (node names coming from <code>path</code>).
- * @param path contains at least <code>lockLevel.getHeight()</code> strings, capturing the names of the
- * <code>SessionNode</code> being walked from the {@link Session#root} to the <code>SessionNode</code>
- * that is to be marked busy.
+ * Marks busy the SessionNode corresponding to <code>lockLevel</code> (node names coming from
+ * <code>path</code>).
+ *
+ * @param path contains at least <code>lockLevel.getHeight()</code> strings, capturing the names
+ * of the <code>SessionNode</code> being walked from the {@link Session#root} to the <code>
+ * SessionNode</code> that is to be marked busy.
* @param lockLevel the level of the node that should be marked busy.
*/
void markBusy(LockLevel lockLevel, List<String> path) {
@@ -104,7 +105,8 @@ public class LockTree {
// Lock is to be set on current node
busy = true;
} else {
- // Recursively create the required SessionNode subtree to capture lock being set on child node.
+ // Recursively create the required SessionNode subtree to capture lock being set on child
+ // node.
String s = path.get(level.getHeight());
if (kids == null) kids = new HashMap<>();
SessionNode child = kids.get(s);
@@ -142,14 +144,13 @@ public class LockTree {
this.mom = mom;
}
- //if this or any of its children are locked
+ // if this or any of its children are locked
boolean isLocked() {
if (myLock != null) return true;
for (Node node : children.values()) if (node.isLocked()) return true;
return false;
}
-
void unlock(LockImpl lockObject) {
if (myLock == lockObject) myLock = null;
else {
@@ -157,12 +158,11 @@ public class LockTree {
}
}
-
Lock lock(LockLevel lockLevel, List<String> path) {
- if (myLock != null) return null;//I'm already locked. no need to go any further
+ if (myLock != null) return null; // I'm already locked. no need to go any further
if (lockLevel == level) {
- //lock is supposed to be acquired at this level
- //If I am locked or any of my children or grandchildren are locked
+ // lock is supposed to be acquired at this level
+ // If I am locked or any of my children or grandchildren are locked
// it is not possible to acquire a lock
if (isLocked()) return null;
return myLock = new LockImpl(this);
@@ -181,6 +181,6 @@ public class LockTree {
return collect;
}
}
- static final Lock FREELOCK = () -> {};
+ static final Lock FREELOCK = () -> {};
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index 0563569..9b0ae2f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -16,6 +16,9 @@
*/
package org.apache.solr.cloud;
+import static org.apache.solr.common.params.CommonParams.ID;
+
+import com.codahale.metrics.Timer;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
@@ -29,8 +32,6 @@ import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
-
-import com.codahale.metrics.Timer;
import org.apache.lucene.util.Version;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
@@ -78,61 +79,69 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.params.CommonParams.ID;
-
/**
- * <p>Cluster leader. Responsible for processing state updates, node assignments, creating/deleting
- * collections, shards, replicas and setting various properties.</p>
+ * Cluster leader. Responsible for processing state updates, node assignments, creating/deleting
+ * collections, shards, replicas and setting various properties.
+ *
+ * <p>The <b>Overseer</b> is a single elected node in the SolrCloud cluster that is in charge of
+ * interactions with ZooKeeper that require global synchronization.
*
- * <p>The <b>Overseer</b> is a single elected node in the SolrCloud cluster that is in charge of interactions with
- * ZooKeeper that require global synchronization. </p>
+ * <p>The Overseer deals with:
*
- * <p>The Overseer deals with:</p>
* <ul>
- * <li>Cluster State updates, i.e. updating Collections' <code>state.json</code> files in ZooKeeper, see {@link ClusterStateUpdater},</li>
- * <li>Collection API implementation, see
- * {@link OverseerCollectionConfigSetProcessor} and {@link OverseerCollectionMessageHandler} (and the example below),</li>
- * <li>Updating Config Sets, see {@link OverseerCollectionConfigSetProcessor} and {@link OverseerConfigSetMessageHandler},</li>
+ * <li>Cluster State updates, i.e. updating Collections' <code>state.json</code> files in
+ * ZooKeeper, see {@link ClusterStateUpdater},
+ * <li>Collection API implementation, see {@link OverseerCollectionConfigSetProcessor} and {@link
+ * OverseerCollectionMessageHandler} (and the example below),
+ * <li>Updating Config Sets, see {@link OverseerCollectionConfigSetProcessor} and {@link
+ * OverseerConfigSetMessageHandler},
* </ul>
*
- * <p>The nodes in the cluster communicate with the Overseer over queues implemented in ZooKeeper. There are essentially
- * two queues:</p>
+ * <p>The nodes in the cluster communicate with the Overseer over queues implemented in ZooKeeper.
+ * There are essentially two queues:
+ *
* <ol>
- * <li>The <b>state update queue</b>, through which nodes request the Overseer to update the <code>state.json</code> file of a
- * Collection in ZooKeeper. This queue is in Zookeeper at <code>/overseer/queue</code>,</li>
- * <li>A queue shared between <b>Collection API and Config Set API</b> requests. This queue is in Zookeeper at
- * <code>/overseer/collection-queue-work</code>.</li>
+ * <li>The <b>state update queue</b>, through which nodes request the Overseer to update the
+ * <code>state.json</code> file of a Collection in ZooKeeper. This queue is in Zookeeper at
+ * <code>/overseer/queue</code>,
+ * <li>A queue shared between <b>Collection API and Config Set API</b> requests. This queue is in
+ * Zookeeper at <code>/overseer/collection-queue-work</code>.
* </ol>
*
- * <p>An example of the steps involved in the Overseer processing a Collection creation API call:</p>
+ * <p>An example of the steps involved in the Overseer processing a Collection creation API call:
+ *
* <ol>
- * <li>Client uses the Collection API with <code>CREATE</code> action and reaches a node of the cluster,</li>
- * <li>The node (via {@link CollectionsHandler}) enqueues the request into the <code>/overseer/collection-queue-work</code>
- * queue in ZooKeepeer,</li>
- * <li>The {@link OverseerCollectionConfigSetProcessor} running on the Overseer node dequeues the message and using an
- * executor service with a maximum pool size of {@link OverseerTaskProcessor#MAX_PARALLEL_TASKS} hands it for processing
- * to {@link OverseerCollectionMessageHandler},</li>
+ * <li>Client uses the Collection API with <code>CREATE</code> action and reaches a node of the
+ * cluster,
+ * <li>The node (via {@link CollectionsHandler}) enqueues the request into the <code>
+ * /overseer/collection-queue-work</code> queue in ZooKeepeer,
+ * <li>The {@link OverseerCollectionConfigSetProcessor} running on the Overseer node dequeues the
+ * message and using an executor service with a maximum pool size of {@link
+ * OverseerTaskProcessor#MAX_PARALLEL_TASKS} hands it for processing to {@link
+ * OverseerCollectionMessageHandler},
* <li>Command {@link CreateCollectionCmd} then executes and does:
- * <ol>
- * <li>Update some state directly in ZooKeeper (creating collection znode),</li>
- * <li>Compute replica placement on available nodes in the cluster,</li>
- * <li>Enqueue a state change request for creating the <code>state.json</code> file for the collection in ZooKeeper.
- * This is done by enqueuing a message in <code>/overseer/queue</code>,</li>
- * <li>The command then waits for the update to be seen in ZooKeeper...</li>
- * </ol></li>
- * <li>The {@link ClusterStateUpdater} (also running on the Overseer node) dequeues the state change message and creates the
- * <code>state.json</code> file in ZooKeeper for the Collection. All the work of the cluster state updater
- * (creations, updates, deletes) is done sequentially for the whole cluster by a single thread.</li>
- * <li>The {@link CreateCollectionCmd} sees the state change in
- * ZooKeeper and:
- * <ol start="5">
- * <li>Builds and sends requests to each node to create the appropriate cores for all the replicas of all shards
- * of the collection. Nodes create the replicas and set them to {@link org.apache.solr.common.cloud.Replica.State#ACTIVE}.</li>
- * </ol></li>
- * <li>The collection creation command has succeeded from the Overseer perspective,</li>
+ * <ol>
+ * <li>Update some state directly in ZooKeeper (creating collection znode),
+ * <li>Compute replica placement on available nodes in the cluster,
+ * <li>Enqueue a state change request for creating the <code>state.json</code> file for the
+ * collection in ZooKeeper. This is done by enqueuing a message in <code>/overseer/queue
+ * </code>,
+ * <li>The command then waits for the update to be seen in ZooKeeper...
+ * </ol>
+ * <li>The {@link ClusterStateUpdater} (also running on the Overseer node) dequeues the state
+ * change message and creates the <code>state.json</code> file in ZooKeeper for the
+ * Collection. All the work of the cluster state updater (creations, updates, deletes) is done
+ * sequentially for the whole cluster by a single thread.
+ * <li>The {@link CreateCollectionCmd} sees the state change in ZooKeeper and:
+ * <ol start="5">
+ * <li>Builds and sends requests to each node to create the appropriate cores for all the
+ * replicas of all shards of the collection. Nodes create the replicas and set them to
+ * {@link org.apache.solr.common.cloud.Replica.State#ACTIVE}.
+ * </ol>
+ * <li>The collection creation command has succeeded from the Overseer perspective,
* <li>{@link CollectionsHandler} checks the replicas in Zookeeper and verifies they are all
- * {@link org.apache.solr.common.cloud.Replica.State#ACTIVE},</li>
- * <li>The client receives a success return.</li>
+ * {@link org.apache.solr.common.cloud.Replica.State#ACTIVE},
+ * <li>The client receives a success return.
* </ol>
*/
public class Overseer implements SolrCloseable {
@@ -140,7 +149,8 @@ public class Overseer implements SolrCloseable {
// System properties are used in tests to make them run fast
public static final int STATE_UPDATE_DELAY = ZkStateReader.STATE_UPDATE_DELAY;
- public static final int STATE_UPDATE_BATCH_SIZE = Integer.getInteger("solr.OverseerStateUpdateBatchSize", 10000);
+ public static final int STATE_UPDATE_BATCH_SIZE =
+ Integer.getInteger("solr.OverseerStateUpdateBatchSize", 10000);
public static final int STATE_UPDATE_MAX_QUEUE = 20000;
public static final int NUM_RESPONSES_TO_STORE = 10000;
@@ -152,24 +162,29 @@ public class Overseer implements SolrCloseable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- enum LeaderStatus {DONT_KNOW, NO, YES}
+ enum LeaderStatus {
+ DONT_KNOW,
+ NO,
+ YES
+ }
/**
- * <p>This class is responsible for dequeueing state change requests from the ZooKeeper queue at <code>/overseer/queue</code>
- * and executing the requested cluster change (essentially writing or updating <code>state.json</code> for a collection).</p>
+ * This class is responsible for dequeueing state change requests from the ZooKeeper queue at
+ * <code>/overseer/queue</code> and executing the requested cluster change (essentially writing or
+ * updating <code>state.json</code> for a collection).
*
- * <p>The cluster state updater is a single thread dequeueing and executing requests.</p>
+ * <p>The cluster state updater is a single thread dequeueing and executing requests.
*/
private class ClusterStateUpdater implements Runnable, Closeable {
private final ZkStateReader reader;
private final SolrZkClient zkClient;
private final String myId;
- //queue where everybody can throw tasks
+ // queue where everybody can throw tasks
private final ZkDistributedQueue stateUpdateQueue;
- //TODO remove in 9.0, we do not push message into this queue anymore
- //Internal queue where overseer stores events that have not yet been published into cloudstate
- //If Overseer dies while extracting the main queue a new overseer will start from this queue
+ // TODO remove in 9.0, we do not push message into this queue anymore
+ // Internal queue where overseer stores events that have not yet been published into cloudstate
+ // If Overseer dies while extracting the main queue a new overseer will start from this queue
private final ZkDistributedQueue workQueue;
// Internal map which holds the information about running tasks.
private final DistributedMap runningMap;
@@ -196,25 +211,29 @@ public class Overseer implements SolrCloseable {
this.reader = reader;
clusterStateUpdaterMetricContext = solrMetricsContext.getChildContext(this);
- clusterStateUpdaterMetricContext.gauge(() -> stateUpdateQueue.getZkStats().getQueueLength(), true, "stateUpdateQueueSize", "queue" );
+ clusterStateUpdaterMetricContext.gauge(
+ () -> stateUpdateQueue.getZkStats().getQueueLength(),
+ true,
+ "stateUpdateQueueSize",
+ "queue");
}
public Stats getStateUpdateQueueStats() {
return stateUpdateQueue.getZkStats();
}
- public Stats getWorkQueueStats() {
+ public Stats getWorkQueueStats() {
return workQueue.getZkStats();
}
@Override
public void run() {
- MDCLoggingContext.setNode(zkController.getNodeName() );
+ MDCLoggingContext.setNode(zkController.getNodeName());
LeaderStatus isLeader = amILeader();
while (isLeader == LeaderStatus.DONT_KNOW) {
log.debug("am_i_leader unclear {}", isLeader);
- isLeader = amILeader(); // not a no, not a yes, try ask again
+ isLeader = amILeader(); // not a no, not a yes, try ask again
}
if (log.isInfoEnabled()) {
@@ -232,13 +251,12 @@ public class Overseer implements SolrCloseable {
isLeader = amILeader();
if (LeaderStatus.NO == isLeader) {
break;
- }
- else if (LeaderStatus.YES != isLeader) {
+ } else if (LeaderStatus.YES != isLeader) {
log.debug("am_i_leader unclear {}", isLeader);
continue; // not a no, not a yes, try ask again
}
- //TODO consider removing 'refreshClusterState' and simply check if clusterState is null
+ // TODO consider removing 'refreshClusterState' and simply check if clusterState is null
if (refreshClusterState) {
try {
reader.forciblyRefreshAllClusterStateSlow();
@@ -250,16 +268,22 @@ public class Overseer implements SolrCloseable {
// the state queue, items would have been left in the
// work queue so let's process those first
byte[] data = fallbackQueue.peek();
- while (fallbackQueueSize > 0 && data != null) {
+ while (fallbackQueueSize > 0 && data != null) {
final ZkNodeProps message = ZkNodeProps.load(data);
if (log.isDebugEnabled()) {
- log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
+ log.debug(
+ "processMessage: fallbackQueueSize: {}, message = {}",
+ fallbackQueue.getZkStats().getQueueLength(),
+ message);
}
try {
- clusterState = processQueueItem(message, clusterState, zkStateWriter, false, null);
+ clusterState =
+ processQueueItem(message, clusterState, zkStateWriter, false, null);
} catch (Exception e) {
if (isBadMessage(e)) {
- log.warn("Exception when process message = {}, consider as bad message and poll out from the queue", message);
+ log.warn(
+ "Exception when process message = {}, consider as bad message and poll out from the queue",
+ message);
fallbackQueue.poll();
}
throw e;
@@ -268,7 +292,8 @@ public class Overseer implements SolrCloseable {
data = fallbackQueue.peek();
fallbackQueueSize--;
}
- // force flush at the end of the loop, if there are no pending updates, this is a no op call
+ // force flush at the end of the loop, if there are no pending updates, this is a no
+ // op call
clusterState = zkStateWriter.writePendingUpdates();
// the workQueue is empty now, use stateUpdateQueue as fallback queue
fallbackQueue = stateUpdateQueue;
@@ -290,7 +315,8 @@ public class Overseer implements SolrCloseable {
LinkedList<Pair<String, byte[]>> queue = null;
try {
- // We do not need to filter any nodes here cause all processed nodes are removed once we flush clusterstate
+ // We do not need to filter any nodes here cause all processed nodes are removed once we
+ // flush clusterstate
queue = new LinkedList<>(stateUpdateQueue.peekElements(1000, 3000L, (x) -> true));
} catch (KeeperException.SessionExpiredException e) {
log.warn("Solr cannot talk to ZK, exiting Overseer main queue loop", e);
@@ -310,12 +336,16 @@ public class Overseer implements SolrCloseable {
byte[] data = head.second();
final ZkNodeProps message = ZkNodeProps.load(data);
if (log.isDebugEnabled()) {
- log.debug("processMessage: queueSize: {}, message = {}", stateUpdateQueue.getZkStats().getQueueLength(), message);
+ log.debug(
+ "processMessage: queueSize: {}, message = {}",
+ stateUpdateQueue.getZkStats().getQueueLength(),
+ message);
}
processedNodes.add(head.first());
fallbackQueueSize = processedNodes.size();
- // force flush to ZK after each message because there is no fallback if workQueue items
+ // force flush to ZK after each message because there is no fallback if workQueue
+ // items
// are removed from workQueue but fail to be written to ZK
while (unprocessedMessages.size() > 0) {
clusterState = zkStateWriter.writePendingUpdates();
@@ -323,18 +353,27 @@ public class Overseer implements SolrCloseable {
clusterState = m.run(clusterState, Overseer.this);
}
// The callback always be called on this thread
- clusterState = processQueueItem(message, clusterState, zkStateWriter, true, () -> {
- stateUpdateQueue.remove(processedNodes);
- processedNodes.clear();
- });
+ clusterState =
+ processQueueItem(
+ message,
+ clusterState,
+ zkStateWriter,
+ true,
+ () -> {
+ stateUpdateQueue.remove(processedNodes);
+ processedNodes.clear();
+ });
}
if (isClosed) break;
// if an event comes in the next 100ms batch it together
- queue = new LinkedList<>(stateUpdateQueue.peekElements(1000, 100, node -> !processedNodes.contains(node)));
+ queue =
+ new LinkedList<>(
+ stateUpdateQueue.peekElements(
+ 1000, 100, node -> !processedNodes.contains(node)));
}
fallbackQueueSize = processedNodes.size();
- // we should force write all pending updates because the next iteration might sleep until there
- // are more items in the main queue
+ // we should force write all pending updates because the next iteration might sleep
+ // until there are more items in the main queue
clusterState = zkStateWriter.writePendingUpdates();
// clean work queue
stateUpdateQueue.remove(processedNodes);
@@ -346,7 +385,7 @@ public class Overseer implements SolrCloseable {
Thread.currentThread().interrupt();
return;
} catch (AlreadyClosedException e) {
-
+
} catch (Exception e) {
log.error("Exception in Overseer main queue loop", e);
refreshClusterState = true; // it might have been a bad version error
@@ -356,7 +395,7 @@ public class Overseer implements SolrCloseable {
if (log.isInfoEnabled()) {
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
}
- //do this in a separate thread because any wait is interrupted in this main thread
+ // do this in a separate thread because any wait is interrupted in this main thread
new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start();
}
}
@@ -366,15 +405,24 @@ public class Overseer implements SolrCloseable {
private boolean isBadMessage(Exception e) {
if (e instanceof KeeperException) {
KeeperException ke = (KeeperException) e;
- return ke.code() == KeeperException.Code.NONODE || ke.code() == KeeperException.Code.NODEEXISTS;
+ return ke.code() == KeeperException.Code.NONODE
+ || ke.code() == KeeperException.Code.NODEEXISTS;
}
return !(e instanceof InterruptedException);
}
- private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter, boolean enableBatching, ZkStateWriter.ZkWriteCallback callback) throws Exception {
+ private ClusterState processQueueItem(
+ ZkNodeProps message,
+ ClusterState clusterState,
+ ZkStateWriter zkStateWriter,
+ boolean enableBatching,
+ ZkStateWriter.ZkWriteCallback callback)
+ throws Exception {
final String operation = message.getStr(QUEUE_OPERATION);
if (operation == null) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Message missing " + QUEUE_OPERATION + ":" + message);
+ throw new SolrException(
+ SolrException.ErrorCode.SERVER_ERROR,
+ "Message missing " + QUEUE_OPERATION + ":" + message);
}
List<ZkWriteCommand> zkWriteCommands = null;
final Timer.Context timerContext = stats.time(operation);
@@ -387,14 +435,17 @@ public class Overseer implements SolrCloseable {
// ZooKeeper in which case another Overseer should take over
// TODO: if ordering for the message is not important, we could
// track retries and put it back on the end of the queue
- log.error("Overseer could not process the current clusterstate state update message, skipping the message: {}", message, e);
+ log.error(
+ "Overseer could not process the current clusterstate state update message, skipping the message: {}",
+ message,
+ e);
stats.error(operation);
} finally {
timerContext.stop();
}
if (zkWriteCommands != null) {
clusterState = zkStateWriter.enqueueUpdate(clusterState, zkWriteCommands, callback);
- if (!enableBatching) {
+ if (!enableBatching) {
clusterState = zkStateWriter.writePendingUpdates();
}
}
@@ -402,8 +453,9 @@ public class Overseer implements SolrCloseable {
}
private void checkIfIamStillLeader() {
- if (zkController != null && (zkController.getCoreContainer().isShutDown() || zkController.isClosed())) {
- return;//shutting down no need to go further
+ if (zkController != null
+ && (zkController.getCoreContainer().isShutDown() || zkController.isClosed())) {
+ return; // shutting down no need to go further
}
org.apache.zookeeper.data.Stat stat = new org.apache.zookeeper.data.Stat();
final String path = OVERSEER_ELECT + "/leader";
@@ -417,84 +469,107 @@ public class Overseer implements SolrCloseable {
return;
}
try {
- Map<?,?> m = (Map<?,?>) Utils.fromJSON(data);
+ Map<?, ?> m = (Map<?, ?>) Utils.fromJSON(data);
String id = (String) m.get(ID);
- if(overseerCollectionConfigSetProcessor.getId().equals(id)){
+ if (overseerCollectionConfigSetProcessor.getId().equals(id)) {
try {
- log.warn("I (id={}) am exiting, but I'm still the leader",
+ log.warn(
+ "I (id={}) am exiting, but I'm still the leader",
overseerCollectionConfigSetProcessor.getId());
- zkClient.delete(path,stat.getVersion(),true);
+ zkClient.delete(path, stat.getVersion(), true);
} catch (KeeperException.BadVersionException e) {
- //no problem ignore it some other Overseer has already taken over
+ // no problem ignore it some other Overseer has already taken over
} catch (Exception e) {
log.error("Could not delete my leader node {}", path, e);
}
- } else{
+ } else {
log.info("somebody else (id={}) has already taken up the overseer position", id);
}
} finally {
- //if I am not shutting down, Then I need to rejoin election
+ // if I am not shutting down, Then I need to rejoin election
try {
if (zkController != null && !zkController.getCoreContainer().isShutDown()) {
zkController.rejoinOverseerElection(null, false);
}
} catch (Exception e) {
- log.warn("Unable to rejoinElection ",e);
+ log.warn("Unable to rejoinElection ", e);
}
}
}
- private List<ZkWriteCommand> processMessage(ClusterState clusterState,
- final ZkNodeProps message, final String operation) {
- CollectionParams.CollectionAction collectionAction = CollectionParams.CollectionAction.get(operation);
+ private List<ZkWriteCommand> processMessage(
+ ClusterState clusterState, final ZkNodeProps message, final String operation) {
+ CollectionParams.CollectionAction collectionAction =
+ CollectionParams.CollectionAction.get(operation);
if (collectionAction != null) {
switch (collectionAction) {
case CREATE:
- return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).createCollection(clusterState, message));
+ return Collections.singletonList(
+ new ClusterStateMutator(getSolrCloudManager())
+ .createCollection(clusterState, message));
case DELETE:
- return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).deleteCollection(clusterState, message));
+ return Collections.singletonList(
+ new ClusterStateMutator(getSolrCloudManager())
+ .deleteCollection(clusterState, message));
case CREATESHARD:
- return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).createShard(clusterState, message));
+ return Collections.singletonList(
+ new CollectionMutator(getSolrCloudManager()).createShard(clusterState, message));
case DELETESHARD:
- return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).deleteShard(clusterState, message));
+ return Collections.singletonList(
+ new CollectionMutator(getSolrCloudManager()).deleteShard(clusterState, message));
case ADDREPLICA:
- return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addReplica(clusterState, message));
+ return Collections.singletonList(
+ new SliceMutator(getSolrCloudManager()).addReplica(clusterState, message));
case ADDREPLICAPROP:
- return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).addReplicaProperty(clusterState, message));
+ return Collections.singletonList(
+ new ReplicaMutator(getSolrCloudManager())
+ .addReplicaProperty(clusterState, message));
case DELETEREPLICAPROP:
- return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).deleteReplicaProperty(clusterState, message));
+ return Collections.singletonList(
+ new ReplicaMutator(getSolrCloudManager())
+ .deleteReplicaProperty(clusterState, message));
case BALANCESHARDUNIQUE:
ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(clusterState, message);
if (dProp.balanceProperty()) {
String collName = message.getStr(ZkStateReader.COLLECTION_PROP);
- return Collections.singletonList(new ZkWriteCommand(collName, dProp.getDocCollection()));
+ return Collections.singletonList(
+ new ZkWriteCommand(collName, dProp.getDocCollection()));
}
break;
case MODIFYCOLLECTION:
- return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).modifyCollection(clusterState,message));
+ return Collections.singletonList(
+ new CollectionMutator(getSolrCloudManager())
+ .modifyCollection(clusterState, message));
default:
- throw new RuntimeException("unknown operation:" + operation
- + " contents:" + message.getProperties());
+ throw new RuntimeException(
+ "unknown operation:" + operation + " contents:" + message.getProperties());
}
} else {
OverseerAction overseerAction = OverseerAction.get(operation);
if (overseerAction == null) {
- throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
+ throw new RuntimeException(
+ "unknown operation:" + operation + " contents:" + message.getProperties());
}
switch (overseerAction) {
case STATE:
- return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).setState(clusterState, message));
+ return Collections.singletonList(
+ new ReplicaMutator(getSolrCloudManager()).setState(clusterState, message));
case LEADER:
- return Collections.singletonList(new SliceMutator(getSolrCloudManager()).setShardLeader(clusterState, message));
+ return Collections.singletonList(
+ new SliceMutator(getSolrCloudManager()).setShardLeader(clusterState, message));
case DELETECORE:
- return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeReplica(clusterState, message));
+ return Collections.singletonList(
+ new SliceMutator(getSolrCloudManager()).removeReplica(clusterState, message));
case ADDROUTINGRULE:
- return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addRoutingRule(clusterState, message));
+ return Collections.singletonList(
+ new SliceMutator(getSolrCloudManager()).addRoutingRule(clusterState, message));
case REMOVEROUTINGRULE:
- return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeRoutingRule(clusterState, message));
+ return Collections.singletonList(
+ new SliceMutator(getSolrCloudManager()).removeRoutingRule(clusterState, message));
case UPDATESHARDSTATE:
- return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
+ return Collections.singletonList(
+ new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
case QUIT:
if (myId.equals(message.get(ID))) {
if (log.isInfoEnabled()) {
@@ -509,7 +584,8 @@ public class Overseer implements SolrCloseable {
case DOWNNODE:
return new NodeMutator(getSolrCloudManager()).downNode(clusterState, message);
default:
- throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
+ throw new RuntimeException(
+ "unknown operation:" + operation + " contents:" + message.getProperties());
}
}
@@ -521,8 +597,8 @@ public class Overseer implements SolrCloseable {
boolean success = true;
String propsId = null;
try {
- ZkNodeProps props = ZkNodeProps.load(zkClient.getData(
- OVERSEER_ELECT + "/leader", null, null, true));
+ ZkNodeProps props =
+ ZkNodeProps.load(zkClient.getData(OVERSEER_ELECT + "/leader", null, null, true));
propsId = props.getStr(ID);
if (myId.equals(propsId)) {
return LeaderStatus.YES;
@@ -547,9 +623,9 @@ public class Overseer implements SolrCloseable {
log.warn("Unexpected exception", e);
} finally {
timerContext.stop();
- if (success) {
+ if (success) {
stats.success("am_i_leader");
- } else {
+ } else {
stats.error("am_i_leader");
}
}
@@ -558,11 +634,10 @@ public class Overseer implements SolrCloseable {
}
@Override
- public void close() {
- this.isClosed = true;
- clusterStateUpdaterMetricContext.unregister();
- }
-
+ public void close() {
+ this.isClosed = true;
+ clusterStateUpdaterMetricContext.unregister();
+ }
}
public static class OverseerThread extends Thread implements Closeable {
@@ -570,7 +645,8 @@ public class Overseer implements SolrCloseable {
protected volatile boolean isClosed;
private final Closeable thread;
- public <T extends Runnable & Closeable> OverseerThread(ThreadGroup ccTg, T thread, String name) {
+ public <T extends Runnable & Closeable> OverseerThread(
+ ThreadGroup ccTg, T thread, String name) {
super(ccTg, thread, name);
this.thread = thread;
}
@@ -588,7 +664,6 @@ public class Overseer implements SolrCloseable {
public boolean isClosed() {
return this.isClosed;
}
-
}
private OverseerThread ccThread;
@@ -616,9 +691,13 @@ public class Overseer implements SolrCloseable {
private final DistributedClusterStateUpdater distributedClusterStateUpdater;
// overseer not responsible for closing reader
- public Overseer(HttpShardHandler shardHandler,
- UpdateShardHandler updateShardHandler, String adminPath,
- final ZkStateReader reader, ZkController zkController, CloudConfig config)
+ public Overseer(
+ HttpShardHandler shardHandler,
+ UpdateShardHandler updateShardHandler,
+ String adminPath,
+ final ZkStateReader reader,
+ ZkController zkController,
+ CloudConfig config)
throws KeeperException, InterruptedException {
this.reader = reader;
this.shardHandler = shardHandler;
@@ -627,49 +706,72 @@ public class Overseer implements SolrCloseable {
this.zkController = zkController;
this.stats = new Stats();
this.config = config;
- this.distributedClusterStateUpdater = new DistributedClusterStateUpdater(config.getDistributedClusterStateUpdates());
-
- this.solrMetricsContext = new SolrMetricsContext(zkController.getCoreContainer().getMetricManager(), SolrInfoBean.Group.overseer.toString(), metricTag);
+ this.distributedClusterStateUpdater =
+ new DistributedClusterStateUpdater(config.getDistributedClusterStateUpdates());
+
+ this.solrMetricsContext =
+ new SolrMetricsContext(
+ zkController.getCoreContainer().getMetricManager(),
+ SolrInfoBean.Group.overseer.toString(),
+ metricTag);
}
public synchronized void start(String id) {
- MDCLoggingContext.setNode(zkController == null ?
- null :
- zkController.getNodeName());
+ MDCLoggingContext.setNode(zkController == null ? null : zkController.getNodeName());
this.id = id;
closed = false;
doClose();
stats = new Stats();
log.info("Overseer (id={}) starting", id);
createOverseerNode(reader.getZkClient());
- //launch cluster state updater thread
+ // launch cluster state updater thread
ThreadGroup tg = new ThreadGroup("Overseer state updater.");
- updaterThread = new OverseerThread(tg, new ClusterStateUpdater(reader, id, stats), "OverseerStateUpdate-" + id);
+ updaterThread =
+ new OverseerThread(
+ tg, new ClusterStateUpdater(reader, id, stats), "OverseerStateUpdate-" + id);
updaterThread.setDaemon(true);
ThreadGroup ccTg = new ThreadGroup("Overseer collection creation process.");
- // Below is the only non test usage of the "cluster state update" queue even when distributed cluster state updates are enabled.
- // That queue is used to tell the Overseer to quit. As long as we have an Overseer, we need to support this.
- OverseerNodePrioritizer overseerPrioritizer = new OverseerNodePrioritizer(reader, this, adminPath, shardHandler.getShardHandlerFactory());
- overseerCollectionConfigSetProcessor = new OverseerCollectionConfigSetProcessor(reader, id, shardHandler, adminPath, stats, Overseer.this, overseerPrioritizer, solrMetricsContext);
- ccThread = new OverseerThread(ccTg, overseerCollectionConfigSetProcessor, "OverseerCollectionConfigSetProcessor-" + id);
+ // Below is the only non test usage of the "cluster state update" queue even when distributed
+ // cluster state updates are enabled. That queue is used to tell the Overseer to quit. As long
+ // as we have an Overseer, we need to support this.
+ OverseerNodePrioritizer overseerPrioritizer =
+ new OverseerNodePrioritizer(reader, this, adminPath, shardHandler.getShardHandlerFactory());
+ overseerCollectionConfigSetProcessor =
+ new OverseerCollectionConfigSetProcessor(
+ reader,
+ id,
+ shardHandler,
+ adminPath,
+ stats,
+ Overseer.this,
+ overseerPrioritizer,
+ solrMetricsContext);
+ ccThread =
+ new OverseerThread(
+ ccTg,
+ overseerCollectionConfigSetProcessor,
+ "OverseerCollectionConfigSetProcessor-" + id);
ccThread.setDaemon(true);
updaterThread.start();
ccThread.start();
- systemCollectionCompatCheck(new BiConsumer<String, Object>() {
- boolean firstPair = true;
- @Override
- public void accept(String s, Object o) {
- if (firstPair) {
- log.warn("WARNING: Collection '.system' may need re-indexing due to compatibility issues listed below. See REINDEXCOLLECTION documentation for more details.");
- firstPair = false;
- }
- log.warn("WARNING: *\t{}:\t{}", s, o);
- }
- });
+ systemCollectionCompatCheck(
+ new BiConsumer<String, Object>() {
+ boolean firstPair = true;
+
+ @Override
+ public void accept(String s, Object o) {
+ if (firstPair) {
+ log.warn(
+ "WARNING: Collection '.system' may need re-indexing due to compatibility issues listed below. See REINDEXCOLLECTION documentation for more details.");
+ firstPair = false;
+ }
+ log.warn("WARNING: *\t{}:\t{}", s, o);
+ }
+ });
getCoreContainer().getClusterSingletons().startClusterSingletons();
@@ -698,22 +800,24 @@ public class Overseer implements SolrCloseable {
doCompatCheck(consumer);
} else {
// wait for all leaders to become active and then check
- zkController.zkStateReader.registerCollectionStateWatcher(CollectionAdminParams.SYSTEM_COLL, (liveNodes, state) -> {
- boolean active = true;
- if (state == null || liveNodes.isEmpty()) {
- return true;
- }
- for (Slice s : state.getActiveSlices()) {
- if (s.getLeader() == null || !s.getLeader().isActive(liveNodes)) {
- active = false;
- break;
- }
- }
- if (active) {
- doCompatCheck(consumer);
- }
- return active;
- });
+ zkController.zkStateReader.registerCollectionStateWatcher(
+ CollectionAdminParams.SYSTEM_COLL,
+ (liveNodes, state) -> {
+ boolean active = true;
+ if (state == null || liveNodes.isEmpty()) {
+ return true;
+ }
+ for (Slice s : state.getActiveSlices()) {
+ if (s.getLeader() == null || !s.getLeader().isActive(liveNodes)) {
+ active = false;
+ break;
+ }
+ }
+ if (active) {
+ doCompatCheck(consumer);
+ }
+ return active;
+ });
}
}
@@ -723,15 +827,20 @@ public class Overseer implements SolrCloseable {
} else {
return;
}
- try (CloudSolrClient client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
- .withSocketTimeout(30000).withConnectionTimeout(15000)
- .withHttpClient(updateShardHandler.getDefaultHttpClient()).build()) {
- CollectionAdminRequest.ColStatus req = CollectionAdminRequest.collectionStatus(CollectionAdminParams.SYSTEM_COLL)
- .setWithSegments(true)
- .setWithFieldInfo(true);
+ try (CloudSolrClient client =
+ new CloudSolrClient.Builder(
+ Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
+ .withSocketTimeout(30000)
+ .withConnectionTimeout(15000)
+ .withHttpClient(updateShardHandler.getDefaultHttpClient())
+ .build()) {
+ CollectionAdminRequest.ColStatus req =
+ CollectionAdminRequest.collectionStatus(CollectionAdminParams.SYSTEM_COLL)
+ .setWithSegments(true)
+ .setWithFieldInfo(true);
CollectionAdminResponse rsp = req.process(client);
- NamedList<?> status = (NamedList<?>)rsp.getResponse().get(CollectionAdminParams.SYSTEM_COLL);
- Collection<?> nonCompliant = (Collection<?>)status.get("schemaNonCompliant");
+ NamedList<?> status = (NamedList<?>) rsp.getResponse().get(CollectionAdminParams.SYSTEM_COLL);
+ Collection<?> nonCompliant = (Collection<?>) status.get("schemaNonCompliant");
if (!nonCompliant.contains("(NONE)")) {
consumer.accept("indexFieldsNotMatchingSchema", nonCompliant);
}
@@ -741,34 +850,36 @@ public class Overseer implements SolrCloseable {
String currentVersion = Version.LATEST.toString();
segmentVersions.add(currentVersion);
segmentCreatedMajorVersions.add(currentMajorVersion);
- NamedList<?> shards = (NamedList<?>)status.get("shards");
+ NamedList<?> shards = (NamedList<?>) status.get("shards");
for (Map.Entry<String, ?> entry : shards) {
- NamedList<?> leader = (NamedList<?>)((NamedList<?>)entry.getValue()).get("leader");
+ NamedList<?> leader = (NamedList<?>) ((NamedList<?>) entry.getValue()).get("leader");
if (leader == null) {
continue;
}
- NamedList<?> segInfos = (NamedList<?>)leader.get("segInfos");
+ NamedList<?> segInfos = (NamedList<?>) leader.get("segInfos");
if (segInfos == null) {
continue;
}
- NamedList<?> infos = (NamedList<?>)segInfos.get("info");
- if (((Number)infos.get("numSegments")).intValue() > 0) {
+ NamedList<?> infos = (NamedList<?>) segInfos.get("info");
+ if (((Number) infos.get("numSegments")).intValue() > 0) {
segmentVersions.add(infos.get("minSegmentLuceneVersion").toString());
}
if (infos.get("commitLuceneVersion") != null) {
segmentVersions.add(infos.get("commitLuceneVersion").toString());
}
- NamedList<?> segmentInfos = (NamedList<?>)segInfos.get("segments");
- segmentInfos.forEach((k, v) -> {
- NamedList<?> segment = (NamedList<?>)v;
- segmentVersions.add(segment.get("version").toString());
- if (segment.get("minVersion") != null) {
- segmentVersions.add(segment.get("version").toString());
- }
- if (segment.get("createdVersionMajor") != null) {
- segmentCreatedMajorVersions.add(((Number)segment.get("createdVersionMajor")).intValue());
- }
- });
+ NamedList<?> segmentInfos = (NamedList<?>) segInfos.get("segments");
+ segmentInfos.forEach(
+ (k, v) -> {
+ NamedList<?> segment = (NamedList<?>) v;
+ segmentVersions.add(segment.get("version").toString());
+ if (segment.get("minVersion") != null) {
+ segmentVersions.add(segment.get("version").toString());
+ }
+ if (segment.get("createdVersionMajor") != null) {
+ segmentCreatedMajorVersions.add(
+ ((Number) segment.get("createdVersionMajor")).intValue());
+ }
+ });
}
if (segmentVersions.size() > 1) {
consumer.accept("differentSegmentVersions", segmentVersions);
@@ -784,18 +895,14 @@ public class Overseer implements SolrCloseable {
}
}
- /**
- * Start {@link ClusterSingleton} plugins when we become the leader.
- */
+ /** Start {@link ClusterSingleton} plugins when we become the leader. */
- /**
- * Stop {@link ClusterSingleton} plugins when we lose leadership.
- */
+ /** Stop {@link ClusterSingleton} plugins when we lose leadership. */
public Stats getStats() {
return stats;
}
- ZkController getZkController(){
+ ZkController getZkController() {
return zkController;
}
@@ -813,7 +920,7 @@ public class Overseer implements SolrCloseable {
/**
* For tests.
- *
+ *
* @lucene.internal
* @return state updater thread
*/
@@ -832,7 +939,6 @@ public class Overseer implements SolrCloseable {
this.closed = true;
doClose();
-
assert ObjectReleaseTracker.release(this);
}
@@ -842,7 +948,7 @@ public class Overseer implements SolrCloseable {
}
private void doClose() {
-
+
if (updaterThread != null) {
IOUtils.closeQuietly(updaterThread);
updaterThread.interrupt();
@@ -854,12 +960,14 @@ public class Overseer implements SolrCloseable {
if (updaterThread != null) {
try {
updaterThread.join();
- } catch (InterruptedException e) {}
+ } catch (InterruptedException e) {
+ }
}
if (ccThread != null) {
try {
ccThread.join();
- } catch (InterruptedException e) {}
+ } catch (InterruptedException e) {
+ }
}
updaterThread = null;
ccThread = null;
@@ -867,64 +975,72 @@ public class Overseer implements SolrCloseable {
/**
* Get queue that can be used to send messages to Overseer.
- * <p>
- * Any and all modifications to the cluster state must be sent to
- * the overseer via this queue. The complete list of overseer actions
- * supported by this queue are documented inside the {@link OverseerAction} enum.
- * <p>
- * Performance statistics on the returned queue
- * are <em>not</em> tracked by the Overseer Stats API,
- * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
- * Therefore, this method should be used only by clients for writing to the overseer queue.
- * <p>
- * This method will create the /overseer znode in ZooKeeper if it does not exist already.
+ *
+ * <p>Any and all modifications to the cluster state must be sent to the overseer via this queue.
+ * The complete list of overseer actions supported by this queue are documented inside the {@link
+ * OverseerAction} enum.
+ *
+ * <p>Performance statistics on the returned queue are <em>not</em> tracked by the Overseer Stats
+ * API, see {@link
+ * org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}. Therefore,
+ * this method should be used only by clients for writing to the overseer queue.
+ *
+ * <p>This method will create the /overseer znode in ZooKeeper if it does not exist already.
*
* @return a {@link ZkDistributedQueue} object
*/
ZkDistributedQueue getStateUpdateQueue() {
if (distributedClusterStateUpdater.isDistributedStateUpdate()) {
- throw new IllegalStateException("Cluster state is done in a distributed way, should not try to access ZK queue");
+ throw new IllegalStateException(
+ "Cluster state is done in a distributed way, should not try to access ZK queue");
}
return getStateUpdateQueue(new Stats());
}
/**
- * Separated into its own method from {@link #getStateUpdateQueue()} that does the same thing because this one is legit
- * to call even when cluster state updates are distributed whereas the other one is not.
+ * Separated into its own method from {@link #getStateUpdateQueue()} that does the same thing
+ * because this one is legit to call even when cluster state updates are distributed whereas the
+ * other one is not.
*/
ZkDistributedQueue getOverseerQuitNotificationQueue() {
return getStateUpdateQueue(new Stats());
}
/**
- * The overseer uses the returned queue to read any operations submitted by clients.
- * This method should not be used directly by anyone other than the Overseer itself.
- * This method will create the /overseer znode in ZooKeeper if it does not exist already.
+ * The overseer uses the returned queue to read any operations submitted by clients. This method
+ * should not be used directly by anyone other than the Overseer itself. This method will create
+ * the /overseer znode in ZooKeeper if it does not exist already.
*
- * @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
+ * @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations
+ * performed by this queue
* @return a {@link ZkDistributedQueue} object
*/
ZkDistributedQueue getStateUpdateQueue(Stats zkStats) {
- return new ZkDistributedQueue(reader.getZkClient(), "/overseer/queue", zkStats, STATE_UPDATE_MAX_QUEUE, new ConnectionManager.IsClosed(){
- public boolean isClosed() {
- return Overseer.this.isClosed() || zkController.getCoreContainer().isShutDown();
- }
- });
+ return new ZkDistributedQueue(
+ reader.getZkClient(),
+ "/overseer/queue",
+ zkStats,
+ STATE_UPDATE_MAX_QUEUE,
+ new ConnectionManager.IsClosed() {
+ public boolean isClosed() {
+ return Overseer.this.isClosed() || zkController.getCoreContainer().isShutDown();
+ }
+ });
}
/**
* Internal overseer work queue. This should not be used outside of Overseer.
- * <p>
- * This queue is used to store overseer operations that have been removed from the
- * state update queue but are being executed as part of a batch. Once
- * the result of the batch is persisted to zookeeper, these items are removed from the
- * work queue. If the overseer dies while processing a batch then a new overseer always
- * operates from the work queue first and only then starts processing operations from the
- * state update queue.
- * This method will create the /overseer znode in ZooKeeper if it does not exist already.
+ *
+ * <p>This queue is used to store overseer operations that have been removed from the state update
+ * queue but are being executed as part of a batch. Once the result of the batch is persisted to
+ * zookeeper, these items are removed from the work queue. If the overseer dies while processing a
+ * batch then a new overseer always operates from the work queue first and only then starts
+ * processing operations from the state update queue. This method will create the /overseer znode
+ * in ZooKeeper if it does not exist already.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
- * @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
+ * @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations
+ * performed by this queue
* @return a {@link ZkDistributedQueue} object
*/
static ZkDistributedQueue getInternalWorkQueue(final SolrZkClient zkClient, Stats zkStats) {
@@ -938,14 +1054,22 @@ public class Overseer implements SolrCloseable {
/* Size-limited map for successfully completed tasks*/
static DistributedMap getCompletedMap(final SolrZkClient zkClient) {
- return new SizeLimitedDistributedMap(zkClient, "/overseer/collection-map-completed", NUM_RESPONSES_TO_STORE, (child) -> getAsyncIdsMap(zkClient).remove(child));
+ return new SizeLimitedDistributedMap(
+ zkClient,
+ "/overseer/collection-map-completed",
+ NUM_RESPONSES_TO_STORE,
+ (child) -> getAsyncIdsMap(zkClient).remove(child));
}
/* Map for failed tasks, not to be used outside of the Overseer */
static DistributedMap getFailureMap(final SolrZkClient zkClient) {
- return new SizeLimitedDistributedMap(zkClient, "/overseer/collection-map-failure", NUM_RESPONSES_TO_STORE, (child) -> getAsyncIdsMap(zkClient).remove(child));
+ return new SizeLimitedDistributedMap(
+ zkClient,
+ "/overseer/collection-map-failure",
+ NUM_RESPONSES_TO_STORE,
+ (child) -> getAsyncIdsMap(zkClient).remove(child));
}
-
+
/* Map of async IDs currently in use*/
static DistributedMap getAsyncIdsMap(final SolrZkClient zkClient) {
return new DistributedMap(zkClient, "/overseer/async_ids");
@@ -953,15 +1077,15 @@ public class Overseer implements SolrCloseable {
/**
* Get queue that can be used to submit collection API tasks to the Overseer.
- * <p>
- * This queue is used internally by the {@link CollectionsHandler} to submit collection API
+ *
+ * <p>This queue is used internally by the {@link CollectionsHandler} to submit collection API
* tasks which are executed by the {@link OverseerCollectionMessageHandler}. The actions supported
- * by this queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
- * enum.
- * <p>
- * Performance statistics on the returned queue
- * are <em>not</em> tracked by the Overseer Stats API,
- * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
+ * by this queue are listed in the {@link
+ * org.apache.solr.common.params.CollectionParams.CollectionAction} enum.
+ *
+ * <p>Performance statistics on the returned queue are <em>not</em> tracked by the Overseer Stats
+ * API, see {@link
+ * org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
@@ -972,14 +1096,14 @@ public class Overseer implements SolrCloseable {
/**
* Get queue that can be used to read collection API tasks to the Overseer.
- * <p>
- * This queue is used internally by the {@link OverseerCollectionMessageHandler} to read collection API
- * tasks submitted by the {@link CollectionsHandler}. The actions supported
- * by this queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
+ *
+ * <p>This queue is used internally by the {@link OverseerCollectionMessageHandler} to read
+ * collection API tasks submitted by the {@link CollectionsHandler}. The actions supported by this
+ * queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
* enum.
- * <p>
- * Performance statistics on the returned queue are tracked by the Overseer Stats API,
- * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
+ *
+ * <p>Performance statistics on the returned queue are tracked by the Overseer Stats API, see
+ * {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
@@ -990,38 +1114,38 @@ public class Overseer implements SolrCloseable {
/**
* Get queue that can be used to submit configset API tasks to the Overseer.
- * <p>
- * This queue is used internally by the {@link org.apache.solr.handler.admin.ConfigSetsHandler} to submit
- * tasks which are executed by the {@link OverseerConfigSetMessageHandler}. The actions supported
- * by this queue are listed in the {@link org.apache.solr.common.params.ConfigSetParams.ConfigSetAction}
- * enum.
- * <p>
- * Performance statistics on the returned queue
- * are <em>not</em> tracked by the Overseer Stats API,
- * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
+ *
+ * <p>This queue is used internally by the {@link org.apache.solr.handler.admin.ConfigSetsHandler}
+ * to submit tasks which are executed by the {@link OverseerConfigSetMessageHandler}. The actions
+ * supported by this queue are listed in the {@link
+ * org.apache.solr.common.params.ConfigSetParams.ConfigSetAction} enum.
+ *
+ * <p>Performance statistics on the returned queue are <em>not</em> tracked by the Overseer Stats
+ * API, see {@link
+ * org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
*/
- OverseerTaskQueue getConfigSetQueue(final SolrZkClient zkClient) {
+ OverseerTaskQueue getConfigSetQueue(final SolrZkClient zkClient) {
return getConfigSetQueue(zkClient, new Stats());
}
/**
* Get queue that can be used to read configset API tasks to the Overseer.
- * <p>
- * This queue is used internally by the {@link OverseerConfigSetMessageHandler} to read configset API
- * tasks submitted by the {@link org.apache.solr.handler.admin.ConfigSetsHandler}. The actions supported
- * by this queue are listed in the {@link org.apache.solr.common.params.ConfigSetParams.ConfigSetAction}
- * enum.
- * <p>
- * Performance statistics on the returned queue are tracked by the Overseer Stats API,
- * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
- * <p>
- * For now, this internally returns the same queue as {@link #getCollectionQueue(SolrZkClient, Stats)}.
- * It is the responsibility of the client to ensure that configset API actions are prefixed with
- * {@link OverseerConfigSetMessageHandler#CONFIGSETS_ACTION_PREFIX} so that it is processed by
- * {@link OverseerConfigSetMessageHandler}.
+ *
+ * <p>This queue is used internally by the {@link OverseerConfigSetMessageHandler} to read
+ * configset API tasks submitted by the {@link org.apache.solr.handler.admin.ConfigSetsHandler}.
+ * The actions supported by this queue are listed in the {@link
+ * org.apache.solr.common.params.ConfigSetParams.ConfigSetAction} enum.
+ *
+ * <p>Performance statistics on the returned queue are tracked by the Overseer Stats API, see
+ * {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
+ *
+ * <p>For now, this internally returns the same queue as {@link #getCollectionQueue(SolrZkClient,
+ * Stats)}. It is the responsibility of the client to ensure that configset API actions are
+ * prefixed with {@link OverseerConfigSetMessageHandler#CONFIGSETS_ACTION_PREFIX} so that it is
+ * processed by {@link OverseerConfigSetMessageHandler}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
@@ -1031,13 +1155,12 @@ public class Overseer implements SolrCloseable {
// that the actions are prefixed with a unique string.
return getCollectionQueue(zkClient, zkStats);
}
-
private void createOverseerNode(final SolrZkClient zkClient) {
try {
zkClient.create("/overseer", new byte[0], CreateMode.PERSISTENT, true);
} catch (KeeperException.NodeExistsException e) {
- //ok
+ // ok
} catch (InterruptedException e) {
log.error("Could not create Overseer node", e);
Thread.currentThread().interrupt();
@@ -1047,19 +1170,25 @@ public class Overseer implements SolrCloseable {
throw new RuntimeException(e);
}
}
-
+
public ZkStateReader getZkStateReader() {
return reader;
}
public void offerStateUpdate(byte[] data) throws KeeperException, InterruptedException {
- // When cluster state update is distributed, the Overseer cluster state update queue should only ever receive QUIT messages.
- // These go to sendQuitToOverseer for execution path clarity.
+ // When cluster state update is distributed, the Overseer cluster state update queue should only
+ // ever receive QUIT messages. These go to sendQuitToOverseer for execution path clarity.
if (distributedClusterStateUpdater.isDistributedStateUpdate()) {
final ZkNodeProps message = ZkNodeProps.load(data);
final String operation = message.getStr(QUEUE_OPERATION);
- log.error("Received unexpected message on Overseer cluster state updater for " + operation + " when distributed updates are configured"); // nowarn
- throw new RuntimeException("Message " + operation + " offered to state update queue when distributed state update is configured.");
+ log.error(
+ "Received unexpected message on Overseer cluster state updater for "
+ + operation
+ + " when distributed updates are configured"); // nowarn
+ throw new RuntimeException(
+ "Message "
+ + operation
+ + " offered to state update queue when distributed state update is configured.");
}
if (zkController.getZkClient().isClosed()) {
throw new AlreadyClosedException();
@@ -1068,8 +1197,8 @@ public class Overseer implements SolrCloseable {
}
/**
- * Submit an intra-process message which will be picked up and executed when {@link ClusterStateUpdater}'s
- * loop runs next time
+ * Submit an intra-process message which will be picked up and executed when {@link
+ * ClusterStateUpdater}'s loop runs next time
*/
public void submit(Message message) {
unprocessedMessages.add(message);
@@ -1080,13 +1209,17 @@ public class Overseer implements SolrCloseable {
}
/**
- * This method enqueues a QUIT message to the overseer of given id.
- * Effect is similar to building the message then calling {@link #offerStateUpdate} but this method can legitimately be called
- * when cluster state update is distributed (and Overseer cluster state updater not really used) while {@link #offerStateUpdate} is not.
- * Note that sending "QUIT" to overseer is not a cluster state update and was likely added to this queue because it was simpler.
+ * This method enqueues a QUIT message to the overseer of given id. Effect is similar to building
+ * the message then calling {@link #offerStateUpdate} but this method can legitimately be called
+ * when cluster state update is distributed (and Overseer cluster state updater not really used)
+ * while {@link #offerStateUpdate} is not. Note that sending "QUIT" to overseer is not a cluster
+ * state update and was likely added to this queue because it was simpler.
*/
- public void sendQuitToOverseer(String overseerId) throws KeeperException, InterruptedException {
- getOverseerQuitNotificationQueue().offer(
- Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(), ID, overseerId)));
+ public void sendQuitToOverseer(String overseerId) throws KeeperException, InterruptedException {
+ getOverseerQuitNotificationQueue()
+ .offer(
+ Utils.toJSON(
+ new ZkNodeProps(
+ Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(), ID, overseerId)));
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
index 1724f6c..b93f9b1 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
@@ -19,7 +19,6 @@ package org.apache.solr.cloud;
import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.CONFIGSETS_ACTION_PREFIX;
import java.io.IOException;
-
import org.apache.commons.io.IOUtils;
import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -29,16 +28,20 @@ import org.apache.solr.handler.component.HttpShardHandlerFactory;
import org.apache.solr.metrics.SolrMetricsContext;
/**
- * An {@link OverseerTaskProcessor} that handles:
- * 1) collection-related Overseer messages
- * 2) configset-related Overseer messages
+ * An {@link OverseerTaskProcessor} that handles: 1) collection-related Overseer messages 2)
+ * configset-related Overseer messages
*/
public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor {
- public OverseerCollectionConfigSetProcessor(ZkStateReader zkStateReader, String myId,
- final HttpShardHandler shardHandler,
- String adminPath, Stats stats, Overseer overseer,
- OverseerNodePrioritizer overseerNodePrioritizer, SolrMetricsContext solrMetricsContext) {
+ public OverseerCollectionConfigSetProcessor(
+ ZkStateReader zkStateReader,
+ String myId,
+ final HttpShardHandler shardHandler,
+ String adminPath,
+ Stats stats,
+ Overseer overseer,
+ OverseerNodePrioritizer overseerNodePrioritizer,
+ SolrMetricsContext solrMetricsContext) {
this(
zkStateReader,
myId,
@@ -51,26 +54,34 @@ public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor
Overseer.getRunningMap(zkStateReader.getZkClient()),
Overseer.getCompletedMap(zkStateReader.getZkClient()),
Overseer.getFailureMap(zkStateReader.getZkClient()),
- solrMetricsContext
- );
+ solrMetricsContext);
}
- protected OverseerCollectionConfigSetProcessor(ZkStateReader zkStateReader, String myId,
- final HttpShardHandlerFactory shardHandlerFactory,
- String adminPath,
- Stats stats,
- Overseer overseer,
- OverseerNodePrioritizer overseerNodePrioritizer,
- OverseerTaskQueue workQueue,
- DistributedMap runningMap,
- DistributedMap completedMap,
- DistributedMap failureMap, SolrMetricsContext solrMetricsContext) {
+ protected OverseerCollectionConfigSetProcessor(
+ ZkStateReader zkStateReader,
+ String myId,
+ final HttpShardHandlerFactory shardHandlerFactory,
+ String adminPath,
+ Stats stats,
+ Overseer overseer,
+ OverseerNodePrioritizer overseerNodePrioritizer,
+ OverseerTaskQueue workQueue,
+ DistributedMap runningMap,
+ DistributedMap completedMap,
+ DistributedMap failureMap,
+ SolrMetricsContext solrMetricsContext) {
super(
zkStateReader,
myId,
stats,
- getOverseerMessageHandlerSelector(zkStateReader, myId, shardHandlerFactory,
- adminPath, stats, overseer, overseerNodePrioritizer),
+ getOverseerMessageHandlerSelector(
+ zkStateReader,
+ myId,
+ shardHandlerFactory,
+ adminPath,
+ stats,
+ overseer,
+ overseerNodePrioritizer),
overseerNodePrioritizer,
workQueue,
runningMap,
@@ -87,10 +98,18 @@ public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor
Stats stats,
Overseer overseer,
OverseerNodePrioritizer overseerNodePrioritizer) {
- final OverseerCollectionMessageHandler collMessageHandler = new OverseerCollectionMessageHandler(
- zkStateReader, myId, shardHandlerFactory, adminPath, stats, overseer, overseerNodePrioritizer);
- final OverseerConfigSetMessageHandler configMessageHandler = new OverseerConfigSetMessageHandler(
- zkStateReader, overseer.getCoreContainer()); //coreContainer is passed instead of configSetService as configSetService is loaded late
+ final OverseerCollectionMessageHandler collMessageHandler =
+ new OverseerCollectionMessageHandler(
+ zkStateReader,
+ myId,
+ shardHandlerFactory,
+ adminPath,
+ stats,
+ overseer,
+ overseerNodePrioritizer);
+ // coreContainer is passed instead of configSetService as configSetService is loaded late
+ final OverseerConfigSetMessageHandler configMessageHandler =
+ new OverseerConfigSetMessageHandler(zkStateReader, overseer.getCoreContainer());
return new OverseerMessageHandlerSelector() {
@Override
public void close() throws IOException {
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java
index e0ce58c..6a01ceb 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java
@@ -16,10 +16,11 @@
*/
package org.apache.solr.cloud;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
import java.lang.invoke.MethodHandles;
import java.util.HashSet;
import java.util.Set;
-
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -31,17 +32,10 @@ import org.apache.solr.core.CoreContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * A {@link OverseerMessageHandler} that handles ConfigSets API related
- * overseer messages.
- */
+/** A {@link OverseerMessageHandler} that handles ConfigSets API related overseer messages. */
public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
- /**
- * Prefix to specify an action should be handled by this handler.
- */
+ /** Prefix to specify an action should be handled by this handler. */
public static final String CONFIGSETS_ACTION_PREFIX = "configsets:";
private ZkStateReader zkStateReader;
@@ -54,8 +48,8 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
// in this way, we prevent a Base ConfigSet from being deleted while it is being copied
// but don't prevent different ConfigSets from being created with the same Base ConfigSet
// at the same time.
- final private Set<String> configSetWriteWip;
- final private Set<String> configSetReadWip;
+ private final Set<String> configSetWriteWip;
+ private final Set<String> configSetReadWip;
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -71,9 +65,12 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
NamedList<Object> results = new NamedList<>();
try {
if (!operation.startsWith(CONFIGSETS_ACTION_PREFIX)) {
- throw new SolrException(ErrorCode.BAD_REQUEST,
- "Operation does not contain proper prefix: " + operation
- + " expected: " + CONFIGSETS_ACTION_PREFIX);
+ throw new SolrException(
+ ErrorCode.BAD_REQUEST,
+ "Operation does not contain proper prefix: "
+ + operation
+ + " expected: "
+ + CONFIGSETS_ACTION_PREFIX);
}
operation = operation.substring(CONFIGSETS_ACTION_PREFIX.length());
log.info("OverseerConfigSetMessageHandler.processMessage : {}, {}", operation, message);
@@ -90,8 +87,7 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
ConfigSetCmds.deleteConfigSet(message, coreContainer);
break;
default:
- throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
- + operation);
+ throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
}
} catch (Exception e) {
String configSetName = message.getStr(NAME);
@@ -99,8 +95,8 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
if (configSetName == null) {
SolrException.log(log, "Operation " + operation + " failed", e);
} else {
- SolrException.log(log, "ConfigSet: " + configSetName + " operation: " + operation
- + " failed", e);
+ SolrException.log(
+ log, "ConfigSet: " + configSetName + " operation: " + operation + " failed", e);
}
results.add("Operation " + operation + " caused exception:", e);
@@ -137,7 +133,6 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
return message.getStr(NAME);
}
-
private void markExclusiveTask(String configSetName, ZkNodeProps message) {
String baseConfigSet = getBaseConfigSetIfCreate(message);
markExclusive(configSetName, baseConfigSet);
@@ -162,7 +157,6 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
}
}
-
private boolean canExecute(String configSetName, ZkNodeProps message) {
String baseConfigSetName = getBaseConfigSetIfCreate(message);
@@ -181,14 +175,10 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
return true;
}
-
private String getBaseConfigSetIfCreate(ZkNodeProps message) {
- String operation = message.getStr(Overseer.QUEUE_OPERATION).substring(CONFIGSETS_ACTION_PREFIX.length());
+ String operation =
+ message.getStr(Overseer.QUEUE_OPERATION).substring(CONFIGSETS_ACTION_PREFIX.length());
ConfigSetParams.ConfigSetAction action = ConfigSetParams.ConfigSetAction.get(operation);
return ConfigSetCmds.getBaseConfigSetName(action, message.getStr(ConfigSetCmds.BASE_CONFIGSET));
}
-
-
-
-
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java
index e25befa..d9a1687 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java
@@ -17,6 +17,8 @@
package org.apache.solr.cloud;
+import static org.apache.solr.common.params.CommonParams.ID;
+
import java.lang.invoke.MethodHandles;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
@@ -29,20 +31,20 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.params.CommonParams.ID;
-
final class OverseerElectionContext extends ElectionContext {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final SolrZkClient zkClient;
private final Overseer overseer;
private volatile boolean isClosed = false;
- public OverseerElectionContext(SolrZkClient zkClient, Overseer overseer, final String zkNodeName) {
+ public OverseerElectionContext(
+ SolrZkClient zkClient, Overseer overseer, final String zkNodeName) {
super(zkNodeName, Overseer.OVERSEER_ELECT, Overseer.OVERSEER_ELECT + "/leader", null, zkClient);
this.overseer = overseer;
this.zkClient = zkClient;
try {
- new ZkCmdExecutor(zkClient.getZkClientTimeout()).ensureExists(Overseer.OVERSEER_ELECT, zkClient);
+ new ZkCmdExecutor(zkClient.getZkClientTimeout())
+ .ensureExists(Overseer.OVERSEER_ELECT, zkClient);
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
} catch (InterruptedException e) {
@@ -52,18 +54,16 @@ final class OverseerElectionContext extends ElectionContext {
}
@Override
- void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs) throws KeeperException,
- InterruptedException {
+ void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs)
+ throws KeeperException, InterruptedException {
if (isClosed) {
return;
}
log.info("I am going to be the leader {}", id);
- final String id = leaderSeqPath
- .substring(leaderSeqPath.lastIndexOf("/") + 1);
+ final String id = leaderSeqPath.substring(leaderSeqPath.lastIndexOf("/") + 1);
ZkNodeProps myProps = new ZkNodeProps(ID, id);
- zkClient.makePath(leaderPath, Utils.toJSON(myProps),
- CreateMode.EPHEMERAL, true);
+ zkClient.makePath(leaderPath, Utils.toJSON(myProps), CreateMode.EPHEMERAL, true);
if (pauseBeforeStartMs > 0) {
try {
Thread.sleep(pauseBeforeStartMs);
@@ -106,5 +106,4 @@ final class OverseerElectionContext extends ElectionContext {
// leader changed - close the overseer
overseer.close();
}
-
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java
index f4601ae..3e369b9 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java
@@ -18,15 +18,12 @@ package org.apache.solr.cloud;
import org.apache.solr.common.cloud.ZkNodeProps;
-/**
- * Interface for processing messages received by an {@link OverseerTaskProcessor}
- */
+/** Interface for processing messages received by an {@link OverseerTaskProcessor} */
public interface OverseerMessageHandler {
/**
* @param message the message to process
* @param operation the operation to process
- *
* @return response
*/
OverseerSolrResponse processMessage(ZkNodeProps message, String operation);
@@ -38,7 +35,6 @@ public interface OverseerMessageHandler {
/**
* @param operation the operation to be timed
- *
* @return the name of the timer to use for the operation
*/
String getTimerName(String operation);
@@ -49,15 +45,14 @@ public interface OverseerMessageHandler {
/**
* Grabs an exclusive lock for this particular task.
+ *
* @return <code>null</code> if locking is not possible.
*/
Lock lockTask(ZkNodeProps message, long batchSessionId);
/**
* @param message the message being processed
- *
* @return the taskKey for the message for handling task exclusivity
*/
String getTaskKey(ZkNodeProps message);
-
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java b/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java
index 2a02562..870372e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java
@@ -20,7 +20,6 @@ import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-
import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkStateReader;
@@ -38,8 +37,8 @@ import org.slf4j.Logger;
... 268666 lines suppressed ...