You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sm...@apache.org on 2014/01/13 23:09:03 UTC
[6/7] AMBARI-4270. Add decommission support for TaskTracker and
modify support for DataNode to match
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 0485c4a..d1ad1a9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -17,13 +17,9 @@
*/
package org.apache.ambari.server.controller.internal;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
import org.apache.ambari.server.controller.AmbariManagementController;
@@ -42,10 +38,12 @@ import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
/**
* Resource provider for host component resources.
@@ -72,15 +70,15 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
protected static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID
= PropertyHelper.getPropertyId("HostRoles", "desired_stack_id");
protected static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
+ = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
protected static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
- = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
-
+ = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
+ protected static final String HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID
+ = PropertyHelper.getPropertyId("HostRoles", "admin_state");
//Component name mappings
private static final Map<String, PropertyProvider> HOST_COMPONENT_PROPERTIES_PROVIDER = new HashMap<String, PropertyProvider>();
-
- private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_CONNECT_TIMEOUT = 1500;
- private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_READ_TIMEOUT = 10000;
+ private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_CONNECT_TIMEOUT = 1500; //milliseconds
+ private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_READ_TIMEOUT = 10000; //milliseconds
static {
ComponentSSLConfiguration configuration = ComponentSSLConfiguration.instance();
@@ -101,7 +99,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
//Parameters from the predicate
private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID =
"params/run_smoke_test";
-
private static Set<String> pkPropertyIds =
new HashSet<String>(Arrays.asList(new String[]{
HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
@@ -114,14 +111,14 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
/**
* Create a new resource provider for the given management controller.
*
- * @param propertyIds the property ids
- * @param keyPropertyIds the key property ids
- * @param managementController the management controller
+ * @param propertyIds the property ids
+ * @param keyPropertyIds the key property ids
+ * @param managementController the management controller
*/
@AssistedInject
public HostComponentResourceProvider(@Assisted Set<String> propertyIds,
- @Assisted Map<Resource.Type, String> keyPropertyIds,
- @Assisted AmbariManagementController managementController) {
+ @Assisted Map<Resource.Type, String> keyPropertyIds,
+ @Assisted AmbariManagementController managementController) {
super(propertyIds, keyPropertyIds, managementController);
}
@@ -130,9 +127,9 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
@Override
public RequestStatus createResources(Request request)
throws SystemException,
- UnsupportedPropertyException,
- ResourceAlreadyExistsException,
- NoSuchParentResourceException {
+ UnsupportedPropertyException,
+ ResourceAlreadyExistsException,
+ NoSuchParentResourceException {
final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
for (Map<String, Object> propertyMap : request.getProperties()) {
@@ -163,8 +160,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
requests.add(getRequest(propertyMap));
}
- Set<Resource> resources = new HashSet<Resource>();
- Set<String> requestedIds = getRequestPropertyIds(request, predicate);
+ Set<Resource> resources = new HashSet<Resource>();
+ Set<String> requestedIds = getRequestPropertyIds(request, predicate);
Set<ServiceComponentHostResponse> responses = getResources(new Command<Set<ServiceComponentHostResponse>>() {
@Override
@@ -172,15 +169,21 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
return getManagementController().getHostComponents(requests);
}
});
-
+
for (ServiceComponentHostResponse response : responses) {
Resource resource = new ResourceImpl(Resource.Type.HostComponent);
- setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID, response.getLiveState(), requestedIds);
- setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
+ response.getClusterName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
+ response.getServiceName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
+ response.getComponentName(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
+ response.getHostname(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID,
+ response.getLiveState(), requestedIds);
+ setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID,
+ response.getDesiredState(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_STACK_ID_PROPERTY_ID,
response.getStackVersion(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID,
@@ -189,15 +192,19 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
response.getActualConfigs(), requestedIds);
setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID,
Boolean.valueOf(response.isStaleConfig()), requestedIds);
-
- String componentName = (String)resource.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+ if (response.getAdminState() != null) {
+ setResourceProperty(resource, HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID,
+ response.getAdminState(), requestedIds);
+ }
+
+ String componentName = (String) resource.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
PropertyProvider propertyProvider = HOST_COMPONENT_PROPERTIES_PROVIDER.get(componentName);
if (propertyProvider != null) {
Set<Resource> resourcesToPopulate = new HashSet<Resource>();
resourcesToPopulate.add(resource);
propertyProvider.populateResources(resourcesToPopulate, request, predicate);
}
-
+
resources.add(resource);
}
return resources;
@@ -205,14 +212,14 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
@Override
public RequestStatus updateResources(final Request request, Predicate predicate)
- throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+ throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
RequestStatusResponse response = null;
final boolean runSmokeTest = "true".equals(getQueryParameterValue(
QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate)) ? true : false;
- Iterator<Map<String,Object>> iterator = request.getProperties().iterator();
+ Iterator<Map<String, Object>> iterator = request.getProperties().iterator();
if (iterator.hasNext()) {
for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
requests.add(getRequest(propertyMap));
@@ -279,8 +286,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
/**
* Get a component request object from a map of property values.
*
- * @param properties the predicate
- *
+ * @param properties the predicate
* @return the component request object
*/
private ServiceComponentHostRequest getRequest(Map<String, Object> properties) {
@@ -294,7 +300,11 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
(String) properties.get(HOST_COMPONENT_STACK_ID_PROPERTY_ID));
if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) {
serviceComponentHostRequest.setStaleConfig(
- properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString());
+ properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString().toLowerCase());
+ }
+ if (properties.get(HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID) != null) {
+ serviceComponentHostRequest.setAdminState(
+ properties.get(HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID).toString());
}
return serviceComponentHostRequest;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
index ea97524..93226c9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
@@ -50,7 +50,7 @@ public class ActionMetadata {
private void fillServiceClients() {
serviceClients.put("hdfs" , Role.HDFS_CLIENT.toString());
- serviceClients.put("glusterfs" , Role.GLUSTERFS_CLIENT.toString());
+ serviceClients.put("glusterfs" , Role.GLUSTERFS_CLIENT.toString());
serviceClients.put("hbase" , Role.HBASE_CLIENT.toString());
serviceClients.put("mapreduce" , Role.MAPREDUCE_CLIENT.toString());
serviceClients.put("zookeeper" , Role.ZOOKEEPER_CLIENT.toString());
@@ -62,9 +62,8 @@ public class ActionMetadata {
}
private void fillServiceActions() {
- serviceActions.put("hdfs" , Arrays.asList(Role.HDFS_SERVICE_CHECK.toString(),
- Role.DECOMMISSION_DATANODE.toString()));
- serviceActions.put("glusterfs" , Arrays.asList(Role.GLUSTERFS_SERVICE_CHECK.toString()));
+ serviceActions.put("hdfs" , Arrays.asList(Role.HDFS_SERVICE_CHECK.toString()));
+ serviceActions.put("glusterfs" , Arrays.asList(Role.GLUSTERFS_SERVICE_CHECK.toString()));
serviceActions.put("hbase" , Arrays.asList(Role.HBASE_SERVICE_CHECK.toString()));
serviceActions.put("mapreduce" , Arrays.asList(Role.MAPREDUCE_SERVICE_CHECK.toString()));
serviceActions.put("mapreduce2" , Arrays.asList(Role.MAPREDUCE2_SERVICE_CHECK.toString()));
@@ -75,9 +74,9 @@ public class ActionMetadata {
serviceActions.put("oozie" , Arrays.asList(Role.OOZIE_SERVICE_CHECK.toString()));
serviceActions.put("pig" , Arrays.asList(Role.PIG_SERVICE_CHECK.toString()));
serviceActions.put("sqoop" , Arrays.asList(Role.SQOOP_SERVICE_CHECK.toString()));
- serviceActions.put("webhcat" , Arrays.asList(Role.WEBHCAT_SERVICE_CHECK.toString()));
- serviceActions.put("storm" , Arrays.asList(Role.STORM_SERVICE_CHECK.toString()));
- serviceActions.put("falcon" , Arrays.asList(Role.FALCON_SERVICE_CHECK.toString()));
+ serviceActions.put("webhcat" , Arrays.asList(Role.WEBHCAT_SERVICE_CHECK.toString()));
+ serviceActions.put("storm" , Arrays.asList(Role.STORM_SERVICE_CHECK.toString()));
+ serviceActions.put("falcon" , Arrays.asList(Role.FALCON_SERVICE_CHECK.toString()));
}
private void fillServiceCheckActions() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
index 615db27..7f04c51 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
@@ -17,10 +17,9 @@
*/
package org.apache.ambari.server.orm.entities;
-import java.util.Collection;
-
import javax.persistence.*;
+import org.apache.ambari.server.state.HostComponentAdminState;
import org.apache.ambari.server.state.State;
import static org.apache.commons.lang.StringUtils.defaultString;
@@ -55,6 +54,10 @@ public class HostComponentDesiredStateEntity {
@Column(name = "desired_stack_version", insertable = true, updatable = true)
private String desiredStackVersion = "";
+ @Enumerated(value = EnumType.STRING)
+ @Column(name = "admin_state", nullable = true, insertable = true, updatable = true)
+ private HostComponentAdminState adminState;
+
@ManyToOne(cascade = CascadeType.PERSIST)
@JoinColumns({
@JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
@@ -114,6 +117,15 @@ public class HostComponentDesiredStateEntity {
this.desiredStackVersion = desiredStackVersion;
}
+
+ public HostComponentAdminState getAdminState() {
+ return adminState;
+ }
+
+ public void setAdminState(HostComponentAdminState attribute) {
+ this.adminState = attribute;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java b/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java
new file mode 100644
index 0000000..fa3a451
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state;
+
+public enum HostComponentAdminState {
+ /**
+ * No attributes.
+ */
+ INSERVICE,
+ /**
+ * The host component is decommissioned.
+ */
+ DECOMMISSIONED,
+ /**
+ * The host component is being decommissioned.
+ */
+ DECOMMISSIONING
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index a982ef1..65c37f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -68,6 +68,8 @@ public interface ServiceComponent {
public boolean isClientComponent();
+ public boolean isMasterComponent();
+
public boolean canBeRemoved();
public void deleteAllServiceComponentHosts() throws AmbariException;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
index b940503..4ce632a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
@@ -81,6 +81,10 @@ public interface ServiceComponentHost {
public void setStackVersion(StackId stackVersion);
+ public HostComponentAdminState getComponentAdminState();
+
+ public void setComponentAdminState(HostComponentAdminState attribute);
+
public ServiceComponentHostResponse convertToResponse();
boolean isPersisted();
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
index d535fd9..e0716c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
@@ -23,8 +23,7 @@ import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
public interface ServiceComponentHostFactory {
- ServiceComponentHost createNew(ServiceComponent serviceComponent,
- String hostName, boolean isClient);
+ ServiceComponentHost createNew(ServiceComponent serviceComponent, String hostName);
ServiceComponentHost createExisting(ServiceComponent serviceComponent,
HostComponentStateEntity stateEntity,
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 5deb0a7..9226cf1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -18,11 +18,6 @@
package org.apache.ambari.server.state;
-import java.util.*;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Injector;
@@ -33,21 +28,37 @@ import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ServiceComponentHostNotFoundException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
+import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityPK;
import org.apache.ambari.server.state.cluster.ClusterImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
public class ServiceComponentImpl implements ServiceComponent {
private final static Logger LOG =
LoggerFactory.getLogger(ServiceComponentImpl.class);
-
private final Service service;
private final ReadWriteLock clusterGlobalLock;
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
-
+ private final boolean isClientComponent;
+ private final boolean isMasterComponent;
+ boolean persisted = false;
@Inject
private Gson gson;
@Inject
@@ -60,22 +71,12 @@ public class ServiceComponentImpl implements ServiceComponent {
private ServiceComponentHostFactory serviceComponentHostFactory;
@Inject
private AmbariMetaInfo ambariMetaInfo;
-
- boolean persisted = false;
private ServiceComponentDesiredStateEntity desiredStateEntity;
-
private Map<String, ServiceComponentHost> hostComponents;
- private final boolean isClientComponent;
-
- private void init() {
- // TODO load during restart
- // initialize from DB
- }
-
@AssistedInject
public ServiceComponentImpl(@Assisted Service service,
- @Assisted String componentName, Injector injector) throws AmbariException {
+ @Assisted String componentName, Injector injector) throws AmbariException {
injector.injectMembers(this);
this.clusterGlobalLock = service.getClusterGlobalLock();
this.service = service;
@@ -100,6 +101,7 @@ public class ServiceComponentImpl implements ServiceComponent {
+ ", stackInfo=" + stackId.getStackId());
}
this.isClientComponent = compInfo.isClient();
+ this.isMasterComponent = compInfo.isMaster();
init();
}
@@ -141,10 +143,16 @@ public class ServiceComponentImpl implements ServiceComponent {
+ ", stackInfo=" + stackId.getStackId());
}
this.isClientComponent = compInfo.isClient();
+ this.isMasterComponent = compInfo.isMaster();
persisted = true;
}
+ private void init() {
+ // TODO load during restart
+ // initialize from DB
+ }
+
@Override
public ReadWriteLock getClusterGlobalLock() {
return clusterGlobalLock;
@@ -163,8 +171,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -180,8 +186,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -197,13 +201,11 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
public Map<String, ServiceComponentHost>
- getServiceComponentHosts() {
+ getServiceComponentHosts() {
clusterGlobalLock.readLock().lock();
try {
readWriteLock.readLock().lock();
@@ -215,8 +217,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -243,8 +243,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.writeLock().unlock();
}
-
-
}
@Override
@@ -282,8 +280,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.writeLock().unlock();
}
-
-
}
@Override
@@ -311,8 +307,7 @@ public class ServiceComponentImpl implements ServiceComponent {
+ ", serviceComponentName=" + getName()
+ ", hostname=" + hostName);
}
- ServiceComponentHost hostComponent =
- serviceComponentHostFactory.createNew(this, hostName, this.isClientComponent());
+ ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
// FIXME need a better approach of caching components by host
ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
clusterImpl.addServiceComponentHost(hostComponent);
@@ -326,13 +321,11 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.writeLock().unlock();
}
-
-
}
@Override
public ServiceComponentHost getServiceComponentHost(String hostname)
- throws AmbariException {
+ throws AmbariException {
clusterGlobalLock.readLock().lock();
try {
readWriteLock.readLock().lock();
@@ -348,8 +341,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -365,8 +356,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -407,8 +396,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -434,8 +421,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -456,8 +441,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -473,8 +456,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -507,8 +488,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -524,8 +503,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -548,8 +525,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Transactional
@@ -586,8 +561,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Transactional
@@ -605,8 +578,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -615,6 +586,11 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
+ public boolean isMasterComponent() {
+ return this.isMasterComponent;
+ }
+
+ @Override
public boolean canBeRemoved() {
clusterGlobalLock.readLock().lock();
try {
@@ -642,8 +618,6 @@ public class ServiceComponentImpl implements ServiceComponent {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index b922293..d9135c0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -18,7 +18,6 @@
package org.apache.ambari.server.state.svccomphost;
-import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
@@ -43,6 +42,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
import org.apache.ambari.server.state.HostConfig;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -589,10 +589,10 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@AssistedInject
public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent,
- @Assisted String hostName, @Assisted boolean isClient, Injector injector) {
+ @Assisted String hostName, Injector injector) {
injector.injectMembers(this);
- if (isClient) {
+ if (serviceComponent.isClientComponent()) {
this.stateMachine = clientStateMachineFactory.make(this);
} else {
this.stateMachine = daemonStateMachineFactory.make(this);
@@ -617,6 +617,11 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
desiredStateEntity.setDesiredState(State.INIT);
desiredStateEntity.setDesiredStackVersion(
gson.toJson(serviceComponent.getDesiredStackVersion()));
+ if(!serviceComponent.isMasterComponent() && !serviceComponent.isClientComponent()) {
+ desiredStateEntity.setAdminState(HostComponentAdminState.INSERVICE);
+ } else {
+ desiredStateEntity.setAdminState(null);
+ }
try {
this.host = clusters.getHost(hostName);
@@ -640,6 +645,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
this.desiredStateEntity = desiredStateEntity;
this.stateEntity = stateEntity;
+
//TODO implement State Machine init as now type choosing is hardcoded in above code
if (serviceComponent.isClientComponent()) {
this.stateMachine = clientStateMachineFactory.make(this);
@@ -755,8 +761,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -772,8 +776,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
/**
@@ -791,7 +793,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
/**
@@ -809,7 +810,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
/**
@@ -827,7 +827,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
/**
@@ -845,7 +844,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
/**
@@ -863,7 +861,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
/**
@@ -881,7 +878,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
@Override
@@ -897,8 +893,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -914,8 +908,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
-
}
@Override
@@ -931,7 +923,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
@Override
@@ -948,10 +939,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
-
@Override
public State getDesiredState() {
clusterGlobalLock.readLock().lock();
@@ -965,7 +954,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
@Override
@@ -997,7 +985,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
@Override
@@ -1014,7 +1001,42 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
+ }
+
+ @Override
+ public HostComponentAdminState getComponentAdminState() {
+ clusterGlobalLock.readLock().lock();
+ try {
+ readLock.lock();
+ try {
+ HostComponentAdminState adminState = desiredStateEntity.getAdminState();
+ if (adminState == null
+ && !serviceComponent.isClientComponent() && !serviceComponent.isMasterComponent()) {
+ adminState = HostComponentAdminState.INSERVICE;
+ }
+ return adminState;
+ } finally {
+ readLock.unlock();
+ }
+ } finally {
+ clusterGlobalLock.readLock().unlock();
+ }
+ }
+ @Override
+ public void setComponentAdminState(HostComponentAdminState attribute) {
+ clusterGlobalLock.readLock().lock();
+ try {
+ writeLock.lock();
+ try {
+ desiredStateEntity.setAdminState(attribute);
+ saveIfPersisted();
+ } finally {
+ writeLock.unlock();
+ }
+ } finally {
+ clusterGlobalLock.readLock().unlock();
+ }
}
@Override
@@ -1031,7 +1053,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
getState().toString(),
getStackVersion().getStackId(),
getDesiredState().toString(),
- getDesiredStackVersion().getStackId());
+ getDesiredStackVersion().getStackId(),
+ getComponentAdminState());
r.setActualConfigs(actualConfigs);
@@ -1040,7 +1063,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} catch (Exception e) {
LOG.error("Could not determine stale config", e);
}
-
+
return r;
} finally {
readLock.unlock();
@@ -1048,7 +1071,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
} finally {
clusterGlobalLock.readLock().unlock();
}
-
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 1c0a66c..6f7f465 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -17,26 +17,6 @@
*/
package org.apache.ambari.server.utils;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import javax.xml.bind.JAXBException;
-
import com.google.common.base.Joiner;
import com.google.gson.Gson;
import org.apache.ambari.server.AmbariException;
@@ -44,12 +24,12 @@ import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.actionmanager.Stage;
import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.HostsMap;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -59,33 +39,44 @@ import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
-public class StageUtils {
-
+import javax.xml.bind.JAXBException;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
- private static final Log LOG = LogFactory.getLog(StageUtils.class);
+public class StageUtils {
- private static Map<String, String> componentToClusterInfoKeyMap =
- new HashMap<String, String>();
- private volatile static Gson gson;
public static final Integer DEFAULT_PING_PORT = 8670;
-
+ private static final Log LOG = LogFactory.getLog(StageUtils.class);
private static final String HOSTS_LIST = "all_hosts";
-
private static final String PORTS = "all_ping_ports";
-
- public static void setGson(Gson gson) {
- if (gson==null) {
- StageUtils.gson = gson;
- }
- }
+ private static Map<String, String> componentToClusterInfoKeyMap =
+ new HashMap<String, String>();
+ private static Map<String, String> decommissionedToClusterInfoKeyMap =
+ new HashMap<String, String>();
+ private volatile static Gson gson;
public static Gson getGson() {
if (gson != null) {
return gson;
} else {
synchronized (LOG) {
- if (gson==null) {
+ if (gson == null) {
gson = new Gson();
}
return gson;
@@ -93,6 +84,11 @@ public class StageUtils {
}
}
+ public static void setGson(Gson gson) {
+ if (gson == null) {
+ StageUtils.gson = gson;
+ }
+ }
static {
componentToClusterInfoKeyMap.put("NAMENODE", "namenode_host");
@@ -121,6 +117,13 @@ public class StageUtils {
componentToClusterInfoKeyMap.put("KERBEROS_ADMIN_CLIENT", "kerberos_adminclient_host");
}
+ static {
+ decommissionedToClusterInfoKeyMap.put("DATANODE", "decom_dn_hosts");
+ decommissionedToClusterInfoKeyMap.put("TASKTRACKER", "decom_tt_hosts");
+ decommissionedToClusterInfoKeyMap.put("NODEMANAGER", "decom_nm_hosts");
+ decommissionedToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "decom_hbase_rs_hosts");
+ }
+
public static String getActionId(long requestId, long stageId) {
return requestId + "-" + stageId;
}
@@ -145,7 +148,7 @@ public class StageUtils {
//For testing only
public static Stage getATestStage(long requestId, long stageId, String hostname, String clusterHostInfo) {
-
+
Stage s = new Stage(requestId, "/tmp", "cluster1", "context", clusterHostInfo);
s.setStageId(stageId);
long now = System.currentTimeMillis();
@@ -175,7 +178,7 @@ public class StageUtils {
}
public static String jaxbToString(Object jaxbObj) throws JAXBException,
- JsonGenerationException, JsonMappingException, IOException {
+ JsonGenerationException, JsonMappingException, IOException {
return getGson().toJson(jaxbObj);
}
@@ -196,121 +199,141 @@ public class StageUtils {
return mapper.readValue(is, clazz);
}
-
public static Map<String, Set<String>> getClusterHostInfo(
- Map<String, Host> allHosts, Cluster cluster, HostsMap hostsMap,
- Configuration configuration) throws AmbariException {
+ Map<String, Host> allHosts, Cluster cluster) throws AmbariException {
Map<String, SortedSet<Integer>> hostRolesInfo = new HashMap<String, SortedSet<Integer>>();
-
+
Map<String, Set<String>> clusterHostInfo = new HashMap<String, Set<String>>();
//Fill hosts and ports lists
Set<String> hostsSet = new LinkedHashSet<String>();
List<Integer> portsList = new ArrayList<Integer>();
-
+
for (Host host : allHosts.values()) {
-
+
Integer currentPingPort = host.getCurrentPingPort() == null ?
DEFAULT_PING_PORT : host.getCurrentPingPort();
-
+
hostsSet.add(host.getHostName());
portsList.add(currentPingPort);
}
-
+
List<String> hostsList = new ArrayList<String>(hostsSet);
-
+
//Fill host roles
- for (Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
-
+ for (Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
+
Service service = serviceEntry.getValue();
-
+
for (Entry<String, ServiceComponent> serviceComponentEntry : service.getServiceComponents().entrySet()) {
-
+
ServiceComponent serviceComponent = serviceComponentEntry.getValue();
String componentName = serviceComponent.getName();
-
+
String roleName = componentToClusterInfoKeyMap.get(componentName);
- if (null == roleName && !serviceComponent.isClientComponent())
+ if (null == roleName && !serviceComponent.isClientComponent()) {
roleName = componentName.toLowerCase() + "_hosts";
-
- if (null == roleName)
+ }
+
+ String decomRoleName = decommissionedToClusterInfoKeyMap.get(componentName);
+
+ if (roleName == null && decomRoleName == null) {
continue;
-
+ }
+
for (String hostName : serviceComponent.getServiceComponentHosts().keySet()) {
-
- SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(roleName);
-
- if (hostsForComponentsHost == null) {
- hostsForComponentsHost = new TreeSet<Integer>();
- hostRolesInfo.put(roleName, hostsForComponentsHost);
+
+ if (roleName != null) {
+ SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(roleName);
+
+ if (hostsForComponentsHost == null) {
+ hostsForComponentsHost = new TreeSet<Integer>();
+ hostRolesInfo.put(roleName, hostsForComponentsHost);
+ }
+
+ int hostIndex = hostsList.indexOf(hostName);
+ //Add index of host to current host role
+ hostsForComponentsHost.add(hostIndex);
}
- int hostIndex = hostsList.indexOf(hostName);
- //Add index of host to current host role
- hostsForComponentsHost.add(hostIndex);
+ if (decomRoleName != null) {
+ ServiceComponentHost scHost = serviceComponent.getServiceComponentHost(hostName);
+ if (scHost.getComponentAdminState() == HostComponentAdminState.DECOMMISSIONED) {
+ SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(decomRoleName);
+
+ if (hostsForComponentsHost == null) {
+ hostsForComponentsHost = new TreeSet<Integer>();
+ hostRolesInfo.put(decomRoleName, hostsForComponentsHost);
+ }
+
+ int hostIndex = hostsList.indexOf(hostName);
+ //Add index of host to current host role
+ hostsForComponentsHost.add(hostIndex);
+ }
+ }
}
}
}
-
+
for (Entry<String, SortedSet<Integer>> entry : hostRolesInfo.entrySet()) {
TreeSet<Integer> sortedSet = new TreeSet<Integer>(entry.getValue());
-
+
Set<String> replacedRangesSet = replaceRanges(sortedSet);
-
+
clusterHostInfo.put(entry.getKey(), replacedRangesSet);
}
clusterHostInfo.put(HOSTS_LIST, hostsSet);
clusterHostInfo.put(PORTS, replaceMappedRanges(portsList));
-
+
return clusterHostInfo;
}
-
-
+
/**
* Finds ranges in sorted set and replaces ranges by compact notation
- *
+ * <p/>
* <p>For example, suppose <tt>set</tt> comprises<tt> [1, 2, 3, 4, 7]</tt>.
- * After invoking <tt>rangedSet = StageUtils.replaceRanges(set)</tt>
+ * After invoking <tt>rangedSet = StageUtils.replaceRanges(set)</tt>
* <tt>rangedSet</tt> will comprise
* <tt>["1-4", "7"]</tt>..
*
- * @param set the source set to be ranged
+ * @param set the source set to be ranged
*/
public static Set<String> replaceRanges(SortedSet<Integer> set) {
-
- if (set == null)
+
+ if (set == null) {
return null;
-
+ }
+
Set<String> rangedSet = new HashSet<String>();
-
+
Integer prevElement = null;
Integer startOfRange = set.first();
-
+
for (Integer i : set) {
- if (prevElement != null && (i - prevElement) > 1 ) {
+ if (prevElement != null && (i - prevElement) > 1) {
String rangeItem = getRangedItem(startOfRange, prevElement);
rangedSet.add(rangeItem);
startOfRange = i;
}
prevElement = i;
}
-
+
rangedSet.add(getRangedItem(startOfRange, prevElement));
-
+
return rangedSet;
}
-
+
/**
* Finds ranges in list and replaces ranges by compact notation
- *
+ * <p/>
* <p>For example, suppose <tt>list</tt> comprises<tt> [1, 1, 2, 2, 1, 3]</tt>.
- * After invoking <tt>rangedMappedSet = StageUtils.replaceMappedRanges(list)</tt>
+ * After invoking <tt>rangedMappedSet = StageUtils.replaceMappedRanges(list)</tt>
* <tt>rangedMappedSet</tt> will comprise
* <tt>["1:0-1,4", "2:2-3", "3:5"]</tt>..
*
- * @param list the source list to be ranged
+ * @param values the source list to be ranged
*/
public static Set<String> replaceMappedRanges(List<Integer> values) {
@@ -341,12 +364,12 @@ public class StageUtils {
}
private static String getRangedItem(Integer startOfRange, Integer endOfRange) {
-
+
String separator = (endOfRange - startOfRange) > 1 ? "-" : ",";
-
- String rangeItem = endOfRange.equals(startOfRange) ?
+
+ String rangeItem = endOfRange.equals(startOfRange) ?
endOfRange.toString() :
- startOfRange + separator + endOfRange;
+ startOfRange + separator + endOfRange;
return rangeItem;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index ba6f1d4..812bba0 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -30,7 +30,7 @@ CREATE TABLE clusters (cluster_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT
CREATE TABLE clusterconfig (version_tag VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data LONGTEXT NOT NULL, create_timestamp BIGINT NOT NULL, PRIMARY KEY (version_tag, type_name, cluster_id));
CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
-CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, admin_state VARCHAR(32), PRIMARY KEY (cluster_id, component_name, host_name, service_name));
CREATE TABLE hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
CREATE TABLE hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, host_attributes LONGTEXT NOT NULL, ipv4 VARCHAR(255), ipv6 VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, ph_cpu_count INTEGER, public_host_name VARCHAR(255), rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY (host_name));
CREATE TABLE hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL, PRIMARY KEY (host_name));
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 4dcd37f..4be3a42 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -20,7 +20,7 @@ CREATE TABLE clusters (cluster_id NUMBER(19) NOT NULL, cluster_info VARCHAR2(255
CREATE TABLE clusterconfig (version_tag VARCHAR2(255) NOT NULL, type_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_data CLOB NOT NULL, create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY (version_tag, type_name, cluster_id));
CREATE TABLE clusterservices (service_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_enabled NUMBER(10) NOT NULL, PRIMARY KEY (service_name, cluster_id));
CREATE TABLE clusterstate (cluster_id NUMBER(19) NOT NULL, current_cluster_state VARCHAR2(255) NULL, current_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
-CREATE TABLE hostcomponentdesiredstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentdesiredstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, admin_state VARCHAR2(32) NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
CREATE TABLE hostcomponentstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, current_stack_version VARCHAR2(255) NOT NULL, current_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
CREATE TABLE hosts (host_name VARCHAR2(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR2(255) NULL, discovery_status VARCHAR2(2000) NULL, host_attributes CLOB NULL, ipv4 VARCHAR2(255) NULL, ipv6 VARCHAR2(255) NULL, last_registration_time INTEGER NOT NULL, os_arch VARCHAR2(255) NULL, os_info VARCHAR2(1000) NULL, os_type VARCHAR2(255) NULL, ph_cpu_count INTEGER NOT NULL, public_host_name VARCHAR2(255) NULL, rack_info VARCHAR2(255) NOT NULL, total_mem INTEGER NOT NULL, PRIMARY KEY (host_name));
CREATE TABLE hoststate (agent_version VARCHAR2(255) NULL, available_mem NUMBER(19) NOT NULL, current_state VARCHAR2(255) NOT NULL, health_status VARCHAR2(255) NULL, host_name VARCHAR2(255) NOT NULL, time_in_state NUMBER(19) NOT NULL, PRIMARY KEY (host_name));
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 2bdb174..ff2b480 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -43,7 +43,7 @@ GRANT ALL PRIVILEGES ON TABLE ambari.clusterservices TO :username;
CREATE TABLE ambari.clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
GRANT ALL PRIVILEGES ON TABLE ambari.clusterstate TO :username;
-CREATE TABLE ambari.hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE ambari.hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, admin_state VARCHAR(32), PRIMARY KEY (cluster_id, component_name, host_name, service_name));
GRANT ALL PRIVILEGES ON TABLE ambari.hostcomponentdesiredstate TO :username;
CREATE TABLE ambari.hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index 52c0a20..a80e7ba 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -64,6 +64,7 @@
"params/run_smoke_test",
"HostRoles/nagios_alerts",
"HostRoles/stale_configs",
+ "HostRoles/admin_state",
"_"
],
"Configuration":[
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
index 4a15586..aabb406 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
@@ -153,16 +153,6 @@ mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-#exclude file
-exlude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-if 'hdfs-exclude-file' in config['configurations']:
- if 'datanodes' in config['configurations']['hdfs-exclude-file']:
- hdfs_exclude_file = config['configurations']['hdfs-exclude-file']['datanodes'].split(",")
- else:
- hdfs_exclude_file = []
-else:
- hdfs_exclude_file = []
-
#hdfs ha properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
index 6243eca..7b406e1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
@@ -265,7 +265,6 @@ def setup_configs():
group=params.user_group
)
- # generate_exlude_file()
# generate_include_file()
def update_log4j_props(file):
@@ -293,16 +292,6 @@ def update_log4j_props(file):
"sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
-def generate_exlude_file():
- import params
-
- File(params.exlude_file_path,
- content=Template("exclude_hosts_list.j2"),
- owner=params.hdfs_user,
- group=params.user_group
- )
-
-
def generate_include_file():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
index aedb8e0..129d0ae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
@@ -33,6 +33,16 @@
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/hbase_master.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
</component>
<component>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
index cd6f3a5..5f587d3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
@@ -32,6 +32,16 @@
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
</component>
<component>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
index 9830b63..d8e191f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
@@ -52,6 +52,8 @@ def namenode(action=None, format=True):
principal=params.dfs_namenode_kerberos_principal
)
+ if action == "decommission":
+ decommission()
def create_name_dirs(directories):
import params
@@ -170,3 +172,21 @@ def format_namenode(force=None):
not_if=format("test -d {mark_dir}"),
path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+ import params
+
+ hdfs_user = params.hdfs_user
+ conf_dir = params.hadoop_conf_dir
+
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=hdfs_user,
+ group=params.user_group
+ )
+
+ ExecuteHadoop('dfsadmin -refreshNodes',
+ user=hdfs_user,
+ conf_dir=conf_dir,
+ kinit_override=True)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
index 9b0fe43..80700c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
@@ -53,7 +53,14 @@ class NameNode(Script):
env.set_params(status_params)
check_process_status(status_params.namenode_pid_file)
+ pass
+
+ def decommission(self, env):
+ import params
+ env.set_params(params)
+ namenode(action="decommission")
+ pass
if __name__ == "__main__":
NameNode().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
index e93c50a..3e0e65b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
@@ -41,6 +41,10 @@ dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site'
dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
#hosts
hostname = config["hostname"]
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
index 1e44d2c..874209b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
@@ -33,6 +33,16 @@
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/jobtracker.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
</component>
<component>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
index a48cc23..8f7f1d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
@@ -77,6 +77,28 @@ class Jobtracker(Script):
import status_params
env.set_params(status_params)
check_process_status(status_params.jobtracker_pid_file)
+ pass
+
+ def decommission(self, env):
+ import params
+
+ env.set_params(params)
+
+ mapred_user = params.mapred_user
+ conf_dir = params.conf_dir
+ user_group = params.user_group
+
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=mapred_user,
+ group=user_group
+ )
+
+ ExecuteHadoop('mradmin -refreshNodes',
+ user=mapred_user,
+ conf_dir=conf_dir,
+ kinit_override=True)
+ pass
if __name__ == "__main__":
Jobtracker().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
index dd8569a..55f4303 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
@@ -46,4 +46,8 @@ hadoop_jar_location = "/usr/lib/hadoop/"
smokeuser = config['configurations']['global']['smokeuser']
security_enabled = config['configurations']['global']['security_enabled']
smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
\ No newline at end of file
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#exclude file
+mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
+exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..02fc5fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in mr_exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
index 0effa01..712a5ab 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
@@ -153,16 +153,6 @@ mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-#exclude file
-exlude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-if 'hdfs-exclude-file' in config['configurations']:
- if 'datanodes' in config['configurations']['hdfs-exclude-file']:
- hdfs_exclude_file = config['configurations']['hdfs-exclude-file']['datanodes'].split(",")
- else:
- hdfs_exclude_file = []
-else:
- hdfs_exclude_file = []
-
#hdfs ha properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
index 9a1661a..f2644aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
@@ -270,7 +270,6 @@ def setup_configs():
group=params.user_group
)
- generate_exlude_file()
generate_include_file()
def update_log4j_props(file):
@@ -298,16 +297,6 @@ def update_log4j_props(file):
"sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
-def generate_exlude_file():
- import params
-
- File(params.exlude_file_path,
- content=Template("exclude_hosts_list.j2"),
- owner=params.hdfs_user,
- group=params.user_group
- )
-
-
def generate_include_file():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
index f6db6be..7227b6e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
@@ -33,20 +33,11 @@
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
- </component>
-
- <component>
- <name>HBASE_REGIONSERVER</name>
- <category>SLAVE</category>
- <commandScript>
- <script>scripts/hbase_regionserver.py</script>
- <scriptType>PYTHON</scriptType>
- </commandScript>
<customCommands>
<customCommand>
<name>DECOMMISSION</name>
<commandScript>
- <script>scripts/hbase_regionserver.py</script>
+ <script>scripts/hbase_master.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
@@ -55,6 +46,15 @@
</component>
<component>
+ <name>HBASE_REGIONSERVER</name>
+ <category>SLAVE</category>
+ <commandScript>
+ <script>scripts/hbase_regionserver.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </component>
+
+ <component>
<name>HBASE_CLIENT</name>
<category>CLIENT</category>
<commandScript>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
index 6cb2dfa..3de6ce5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
@@ -32,6 +32,16 @@
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
</component>
<component>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
index ecda794..e26f758 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
@@ -34,6 +34,13 @@ def namenode(action=None, format=True):
if format:
format_namenode()
pass
+
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+
service(
action="start", name="namenode", user=params.hdfs_user,
keytab=params.dfs_namenode_keytab_file,
@@ -53,6 +60,8 @@ def namenode(action=None, format=True):
principal=params.dfs_namenode_kerberos_principal
)
+ if action == "decommission":
+ decommission()
def create_name_dirs(directories):
import params
@@ -178,3 +187,21 @@ def format_namenode(force=None):
not_if=format("test -d {mark_dir}"),
path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+ import params
+
+ hdfs_user = params.hdfs_user
+ conf_dir = params.hadoop_conf_dir
+
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+
+ ExecuteHadoop('dfsadmin -refreshNodes',
+ user=hdfs_user,
+ conf_dir=conf_dir,
+ kinit_override=True)
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
index 9a1dba36..deb01d5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
@@ -55,7 +55,14 @@ class NameNode(Script):
env.set_params(status_params)
check_process_status(status_params.namenode_pid_file)
+ pass
+
+ def decommission(self, env):
+ import params
+ env.set_params(params)
+ namenode(action="decommission")
+ pass
if __name__ == "__main__":
NameNode().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
index 568a8a8..a2479c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
@@ -41,6 +41,10 @@ dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site'
dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
#hosts
hostname = config["hostname"]
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
index 127d055..f25d80b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
@@ -33,6 +33,16 @@
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/resourcemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
</component>
<component>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
index b1ad27d..2253658 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
@@ -17,4 +17,9 @@
--
--- DDL
\ No newline at end of file
+-- DDL
+
+--Upgrade version to current
+UPDATE metainfo SET "metainfo_value" = '${ambariVersion}' WHERE metainfo_key = 'version';
+
+ALTER TABLE hostcomponentdesiredstate ADD admin_state VARCHAR(32);
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
index bcd958d..916a1e8 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
@@ -25,6 +25,9 @@ ALTER TABLE hostconfigmapping ADD (user_name VARCHAR2 (255) DEFAULT '_db');
ALTER TABLE stage ADD (cluster_host_info BLOB DEFAULT NULL);
+-- add decommission state
+ALTER TABLE hostcomponentdesiredstate ADD (component_attribute VARCHAR2 (255) DEFAULT NULL);
+
-- DML
--Upgrade version to current
UPDATE metainfo SET "metainfo_key" = 'version', "metainfo_value" = '${ambariVersion}';
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
index a914a3f..bb02aba 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
@@ -137,6 +137,9 @@ CREATE TABLE ambari.action (action_name VARCHAR(255) NOT NULL, action_type VARCH
target_service VARCHAR(255), target_component VARCHAR(255), default_timeout SMALLINT NOT NULL, description VARCHAR(1000), target_type VARCHAR(32), PRIMARY KEY (action_name));
GRANT ALL PRIVILEGES ON TABLE ambari.action TO :username;
+-- add decommission state
+ALTER TABLE ambari.hostcomponentdesiredstate ADD COLUMN admin_state VARCHAR(32);
+
--Move cluster host info for old execution commands to stage table
UPDATE ambari.stage sd
SET