You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/09/01 16:31:44 UTC

[1/3] ambari git commit: AMBARI-21809. Pre-configure services during stack upgrade if Kerberos is enabled to reduce number of core service restarts when services are added (rlevas)

Repository: ambari
Updated Branches:
  refs/heads/trunk e20c06400 -> 5230d9354


http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack_preconfigure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack_preconfigure.json b/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack_preconfigure.json
new file mode 100644
index 0000000..70fc3ed
--- /dev/null
+++ b/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack_preconfigure.json
@@ -0,0 +1,730 @@
+{
+  "identities": [
+    {
+      "keytab": {
+        "configuration": "cluster-env/smokeuser_keytab",
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        },
+        "owner": {
+          "access": "r",
+          "name": "${cluster-env/smokeuser}"
+        }
+      },
+      "name": "smokeuser",
+      "principal": {
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}",
+        "type": "user",
+        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}"
+      }
+    },
+    {
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        },
+        "owner": {
+          "access": "r",
+          "name": "root"
+        }
+      },
+      "name": "spnego",
+      "principal": {
+        "configuration": null,
+        "local_username": null,
+        "type": "service",
+        "value": "HTTP/_HOST@${realm}"
+      }
+    }
+  ],
+  "services": [
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "group": {},
+                "owner": {
+                  "access": "r"
+                }
+              },
+              "name": "ambari-server",
+              "principal": {
+                "configuration": "cluster-env/ambari_principal_name",
+                "local_username": null,
+                "type": "user",
+                "value": "ambari-server${principal_suffix}@${realm}"
+              }
+            },
+            {
+              "name": "ambari-server_spnego",
+              "reference": "/spnego"
+            }
+          ],
+          "name": "AMBARI_SERVER"
+        }
+      ],
+      "name": "AMBARI"
+    },
+    {
+      "auth_to_local_properties": [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "components": [
+        {
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.datanode.address": "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.datanode.keytab.file",
+                "file": "${keytab_dir}/dn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "datanode_dn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "dn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "DATANODE"
+        },
+        {
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ],
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file",
+                "file": "${keytab_dir}/jn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "journalnode_jn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "jn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "JOURNALNODE"
+        },
+        {
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": "${keytab_dir}/nn.service.keytab"
+              },
+              "name": "/HDFS/NAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": "nn/_HOST@${realm}"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hadoop-env/hdfs_user_keytab",
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "hdfs",
+              "principal": {
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "user",
+                "value": "${hadoop-env/hdfs_user}${principal_suffix}@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.namenode.keytab.file",
+                "file": "${keytab_dir}/nn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "namenode_nn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NAMENODE"
+        },
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hdfs-site/nfs.keytab.file",
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "nfsgateway",
+              "principal": {
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nfs/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NFS_GATEWAY"
+        },
+        {
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file",
+                "file": "${keytab_dir}/nn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "SECONDARY_NAMENODE"
+        }
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "ha.zookeeper.acl": "sasl:nn:rwcda",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+          }
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab",
+            "file": "${keytab_dir}/spnego.service.keytab"
+          },
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal",
+            "local_username": null,
+            "type": null,
+            "value": "HTTP/_HOST@${realm}"
+          }
+        }
+      ],
+      "name": "HDFS"
+    },
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.keytab",
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "yarn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file",
+                "file": null
+              },
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal",
+                "local_username": null,
+                "type": null,
+                "value": null
+              },
+              "when": {
+                "contains": [
+                  "services",
+                  "HIVE"
+                ]
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file",
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "group": {
+                  "access": "r",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "llap_zk_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal",
+                "local_username": null,
+                "type": "service",
+                "value": "hive/_HOST@${realm}"
+              },
+              "when": {
+                "contains": [
+                  "services",
+                  "HIVE"
+                ]
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.keytab",
+                "file": "${keytab_dir}/nm.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "nodemanager_nm",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "nm/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NODEMANAGER"
+        },
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": "${keytab_dir}/rm.service.keytab"
+              },
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": "rm/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.keytab",
+                "file": "${keytab_dir}/rm.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "resource_manager_rm",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "rm/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "RESOURCEMANAGER"
+        }
+      ],
+      "configurations": [
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+          }
+        },
+        {
+          "yarn-site": {
+            "hadoop.registry.client.auth": "kerberos",
+            "hadoop.registry.jaas.context": "Client",
+            "hadoop.registry.secure": "true",
+            "hadoop.registry.system.accounts": "sasl:${principals/YARN/APP_TIMELINE_SERVER/app_timeline_server_yarn|principalPrimary()},sasl:${principals/MAPREDUCE2/HISTORYSERVER/history_server_jhs|principalPrimary()},sasl:${principals/HDFS/NAMENODE/hdfs|principalPrimary()},sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()},sasl:${principals/HIVE/HIVE_SERVER/hive_server_hive|principalPrimary()}",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${activity-conf/global.activity.analyzer.user},dr.who,${yarn-env/yarn_user}",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.zk-acl": "sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()}:rwcda",
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.type": "kerberos"
+          }
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "/spnego"
+        }
+      ],
+      "name": "YARN"
+    },
+    {
+      "components": [
+        {
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "gateway-site": {
+                "gateway.hadoop.kerberos.secured": "true",
+                "java.security.krb5.conf": "/etc/krb5.conf"
+              }
+            },
+            {
+              "oozie-site": {
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "ranger-knox-audit": {
+                "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+                "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+                "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+                "xasecure.audit.jaas.Client.option.serviceName": "solr",
+                "xasecure.audit.jaas.Client.option.storeKey": "false",
+                "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+              }
+            },
+            {
+              "webhcat-site": {
+                "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": null
+              },
+              "name": "/KNOX/KNOX_GATEWAY/knox_principal",
+              "principal": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": null
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "knox-env/knox_keytab_path",
+                "file": "${keytab_dir}/knox.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${knox-env/knox_user}"
+                }
+              },
+              "name": "knox_principal",
+              "principal": {
+                "configuration": "knox-env/knox_principal_name",
+                "local_username": "${knox-env/knox_user}",
+                "type": "service",
+                "value": "${knox-env/knox_user}/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "KNOX_GATEWAY"
+        }
+      ],
+      "preconfigure": true,
+      "name": "KNOX"
+    },
+    {
+      "name": "BEACON",
+      "preconfigure": true,
+      "configurations": {
+      },
+      "identities": [
+        {
+          "name": "beacon_server",
+          "principal": {
+            "value": "beacon/_HOST@${realm}",
+            "type": "service",
+            "local_username": "beacon"
+          }
+        }
+      ]
+    },
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "zookeeper-env/zookeeper_keytab_path",
+                "file": "${keytab_dir}/zk.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${zookeeper-env/zk_user}"
+                }
+              },
+              "name": "zookeeper_zk",
+              "principal": {
+                "configuration": "zookeeper-env/zookeeper_principal_name",
+                "local_username": null,
+                "type": "service",
+                "value": "zookeeper/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "ZOOKEEPER_SERVER"
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "name": "ZOOKEEPER"
+    }
+  ],
+  "properties": {
+    "additional_realms": "",
+    "keytab_dir": "/etc/security/keytabs",
+    "principal_suffix": "-${cluster_name|toLower()}",
+    "realm": "EXAMPLE.COM"
+  }
+}
\ No newline at end of file


[3/3] ambari git commit: AMBARI-21809. Pre-configure services during stack upgrade if Kerberos is enabled to reduce number of core service restarts when services are added (rlevas)

Posted by rl...@apache.org.
AMBARI-21809. Pre-configure services during stack upgrade if Kerberos is enabled to reduce number of core service restarts when services are added (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5230d935
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5230d935
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5230d935

Branch: refs/heads/trunk
Commit: 5230d93545c30ab06d49f9cbac25974b1462f914
Parents: e20c064
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Sep 1 12:31:23 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Sep 1 12:31:23 2017 -0400

----------------------------------------------------------------------
 .../controller/DeleteIdentityHandler.java       |   2 +-
 .../server/controller/KerberosHelper.java       |  65 +-
 .../server/controller/KerberosHelperImpl.java   | 142 ++--
 .../PrepareDisableKerberosServerAction.java     |  33 +-
 .../PrepareEnableKerberosServerAction.java      |   3 +-
 .../PrepareKerberosIdentitiesServerAction.java  |   5 +-
 .../upgrades/PreconfigureKerberosAction.java    | 573 +++++++++++++++
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  12 +
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |  12 +
 .../PreconfigureKerberosActionTest.java         | 596 +++++++++++++++
 .../PreconfigureActionTest_cluster_config.json  | 110 +++
 ...ureActionTest_kerberos_descriptor_stack.json | 713 ++++++++++++++++++
 ..._kerberos_descriptor_stack_preconfigure.json | 730 +++++++++++++++++++
 13 files changed, 2920 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
index 978b329..a7b9d80 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
@@ -246,7 +246,7 @@ class DeleteIdentityHandler {
      * The service configuration is needed because principal names may contain placeholder variables which are replaced based on the service configuration.
      */
     private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor, Set<String> serviceNames) throws AmbariException {
-      Map<String, Map<String, String>> actualConfig = getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+      Map<String, Map<String, String>> actualConfig = getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor, false, false);
       extendWithDeletedConfigOfService(actualConfig, serviceNames);
       return actualConfig;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index 9bdb377..bb360b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -36,6 +36,7 @@ import org.apache.ambari.server.serveraction.kerberos.KerberosOperationException
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
@@ -510,6 +511,30 @@ public interface KerberosHelper {
       throws AmbariException;
 
   /**
+   * Gets the Kerberos descriptor for the requested stack.
+   * <p>
+   * One of the following Kerberos descriptors will be returned:
+   * <dl>
+   * <dt>{@link KerberosDescriptorType#STACK}</dt>
+   * <dd>A Kerberos descriptor built using data from the current stack definition, only</dd>
+   * <dt>{@link KerberosDescriptorType#USER}</dt>
+   * <dd>A Kerberos descriptor built using user-specified data stored as an artifact of the cluster, only</dd>
+   * <dt>{@link KerberosDescriptorType#COMPOSITE}</dt>
+   * <dd>A Kerberos descriptor built using data from the current stack definition with user-specified data stored as an artifact of the cluster applied
+   * - see {@link #getKerberosDescriptor(Cluster, boolean)}</dd>
+   * </dl>
+   *
+   * @param kerberosDescriptorType  the type of Kerberos descriptor to retrieve - see {@link KerberosDescriptorType}
+   * @param cluster                 the relevant Cluster
+   * @param stackId                 the relevant stack id, used for <code>COMPOSITE</code> or <code>STACK</code> Kerberos descriptor requests
+   * @param includePreconfigureData <code>true</code> to include the preconfigure data; <code>false</code> otherwise
+   * @return a Kerberos descriptor
+   * @throws AmbariException
+   */
+  KerberosDescriptor getKerberosDescriptor(KerberosDescriptorType kerberosDescriptorType, Cluster cluster, StackId stackId, boolean includePreconfigureData)
+      throws AmbariException;
+
+  /**
    * Merges configurations from a Map of configuration updates into a main configurations Map.
    * <p>
    * Each property in the updates Map is processed to replace variables using the replacement Map,
@@ -583,12 +608,16 @@ public interface KerberosHelper {
    *
    * @param cluster                      the relevant Cluster
    * @param hostname                     the relevant hostname
-   * @param kerberosDescriptorProperties a map of general Kerberos descriptor properties
+   * @param kerberosDescriptor a map of general Kerberos descriptor properties
+   * @param includePreconfigureData <code>true</code> to include the preconfigure data; otherwise false
+   * @param calculateClusterHostInfo
    * @return a Map of calculated configuration types
    * @throws AmbariException
    */
   Map<String, Map<String, String>> calculateConfigurations(Cluster cluster, String hostname,
-                                                           Map<String, String> kerberosDescriptorProperties)
+                                                           KerberosDescriptor kerberosDescriptor,
+                                                           boolean includePreconfigureData,
+                                                           boolean calculateClusterHostInfo)
       throws AmbariException;
 
   /**
@@ -709,6 +738,38 @@ public interface KerberosHelper {
   PrincipalKeyCredential getKDCAdministratorCredentials(String clusterName) throws AmbariException;
 
   /**
+   * Translates a collection of configuration specifications (<code>config-type/property-name</code>)
+   * to a map of configuration types to a set of property names.
+   * <p>
+   * For example:
+   * <ul>
+   * <li>config-type1/property-name1</li>
+   * <li>config-type1/property-name2</li>
+   * <li>config-type2/property-name3</li>
+   * </ul>
+   * Becomes
+   * <ul>
+   * <li>
+   * config-type
+   * <ul>
+   * <li>property-name1</li>
+   * <li>property-name2</li>
+   * </ul>
+   * </li>
+   * <li>
+   * config-type2
+   * <ul>
+   * <li>property-name3</li>
+   * </ul>
+   * </li>
+   * </ul>
+   *
+   * @param configurationSpecifications a collection of configuration specifications (<code>config-type/property-name</code>)
+   * @return a map of configuration types to sets of property names
+   */
+  Map<String, Set<String>> translateConfigurationSpecifications(Collection<String> configurationSpecifications);
+
+  /**
    * Types of Kerberos descriptors related to where the data is stored.
    * <dl>
    * <dt>STACK</dt>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index d86433f..013a063 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -1328,15 +1328,20 @@ public class KerberosHelperImpl implements KerberosHelper {
                                                   boolean includePreconfigureData)
       throws AmbariException {
 
-    KerberosDescriptor stackDescriptor = (kerberosDescriptorType == KerberosDescriptorType.STACK || kerberosDescriptorType == KerberosDescriptorType.COMPOSITE)
-        ? getKerberosDescriptorFromStack(cluster, includePreconfigureData)
-        : null;
+    // !!! FIXME in a per-service view, what does this become?
+    Set<StackId> stackIds = new HashSet<>();
 
-    KerberosDescriptor userDescriptor = (kerberosDescriptorType == KerberosDescriptorType.USER || kerberosDescriptorType == KerberosDescriptorType.COMPOSITE)
-        ? getKerberosDescriptorUpdates(cluster)
-        : null;
+    for (Service service : cluster.getServices().values()) {
+      stackIds.add(service.getDesiredStackId());
+    }
 
-    KerberosDescriptor kerberosDescriptor = combineKerberosDescriptors(stackDescriptor, userDescriptor);
+    if (1 != stackIds.size()) {
+      throw new AmbariException("Services are deployed from multiple stacks and cannot determine a unique one.");
+    }
+
+    StackId stackId = stackIds.iterator().next();
+
+    KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(kerberosDescriptorType, cluster, stackId, includePreconfigureData);
 
     if (evaluateWhenClauses) {
       Set<String> services = new HashSet<>(cluster.getServices().keySet());
@@ -1348,7 +1353,7 @@ public class KerberosHelperImpl implements KerberosHelper {
       // Build the context needed to filter out Kerberos identities...
       // This includes the current set of configurations for the cluster and the set of installed services
       Map<String, Object> context = new HashMap<>();
-      context.put("configurations", calculateConfigurations(cluster, null, kerberosDescriptor.getProperties()));
+      context.put("configurations", calculateConfigurations(cluster, null, kerberosDescriptor, false, false));
       context.put("services", services);
 
       // Get the Kerberos identities that need to be pruned
@@ -1385,6 +1390,20 @@ public class KerberosHelperImpl implements KerberosHelper {
   }
 
   @Override
+  public KerberosDescriptor getKerberosDescriptor(KerberosDescriptorType kerberosDescriptorType, Cluster cluster,
+                                                  StackId stackId, boolean includePreconfigureData) throws AmbariException {
+    KerberosDescriptor stackDescriptor = (kerberosDescriptorType == KerberosDescriptorType.STACK || kerberosDescriptorType == KerberosDescriptorType.COMPOSITE)
+        ? getKerberosDescriptorFromStack(stackId, includePreconfigureData)
+        : null;
+
+    KerberosDescriptor userDescriptor = (kerberosDescriptorType == KerberosDescriptorType.USER || kerberosDescriptorType == KerberosDescriptorType.COMPOSITE)
+        ? getKerberosDescriptorUpdates(cluster)
+        : null;
+
+    return combineKerberosDescriptors(stackDescriptor, userDescriptor);
+  }
+
+  @Override
   public Map<String, Map<String, String>> mergeConfigurations(Map<String, Map<String, String>> configurations,
                                                               Map<String, KerberosConfigurationDescriptor> updates,
                                                               Map<String, Map<String, String>> replacements,
@@ -1503,18 +1522,20 @@ public class KerberosHelperImpl implements KerberosHelper {
             }
 
             // Append an entry to the action data file builder...
-            kerberosIdentityDataFileWriter.writeRecord(
-                hostname,
-                serviceName,
-                componentName,
-                principal,
-                principalType,
-                keytabFilePath,
-                keytabFileOwnerName,
-                keytabFileOwnerAccess,
-                keytabFileGroupName,
-                keytabFileGroupAccess,
-                (keytabIsCachable) ? "true" : "false");
+            if(kerberosIdentityDataFileWriter != null) {
+              kerberosIdentityDataFileWriter.writeRecord(
+                  hostname,
+                  serviceName,
+                  componentName,
+                  principal,
+                  principalType,
+                  keytabFilePath,
+                  keytabFileOwnerName,
+                  keytabFileOwnerAccess,
+                  keytabFileGroupName,
+                  keytabFileGroupAccess,
+                  (keytabIsCachable) ? "true" : "false");
+            }
 
             // Add the principal-related configuration to the map of configurations
             mergeConfiguration(kerberosConfigurations, principalConfiguration, principal, null);
@@ -1533,11 +1554,22 @@ public class KerberosHelperImpl implements KerberosHelper {
 
   @Override
   public Map<String, Map<String, String>> calculateConfigurations(Cluster cluster, String hostname,
-                                                                  Map<String, String> kerberosDescriptorProperties)
+                                                                  KerberosDescriptor kerberosDescriptor,
+                                                                  boolean includePreconfigureData,
+                                                                  boolean calculateClusterHostInfo)
       throws AmbariException {
-    return addAdditionalConfigurations(cluster,
+
+
+    Map<String, Map<String, String>> calculatedConfigurations = addAdditionalConfigurations(
+        cluster,
         calculateExistingConfigurations(cluster, hostname),
-        hostname, kerberosDescriptorProperties);
+        hostname,
+        (kerberosDescriptor == null) ? null : kerberosDescriptor.getProperties());
+
+    if (includePreconfigureData) {
+      calculatedConfigurations = addConfigurationsForPreProcessedServices(calculatedConfigurations, cluster, kerberosDescriptor, calculateClusterHostInfo);
+    }
+    return calculatedConfigurations;
   }
 
   private Map<String, String> principalNames(Cluster cluster, Map<String, Map<String, String>> configuration) throws AmbariException {
@@ -1600,8 +1632,6 @@ public class KerberosHelperImpl implements KerberosHelper {
         KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster, false);
 
         if (kerberosDescriptor != null) {
-          Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
-
           Set<String> existingServices = cluster.getServices().keySet();
 
           for (String hostname : hosts) {
@@ -1609,7 +1639,9 @@ public class KerberosHelperImpl implements KerberosHelper {
             // variables within the Kerberos descriptor data
             Map<String, Map<String, String>> configurations = calculateConfigurations(cluster,
                 hostname.equals(ambariServerHostname) ? null : hostname,
-                kerberosDescriptorProperties);
+                kerberosDescriptor,
+                false,
+                false);
 
             // Create the context to use for filtering Kerberos Identities based on the state of the cluster
             Map<String, Object> filterContext = new HashMap<>();
@@ -1755,6 +1787,37 @@ public class KerberosHelperImpl implements KerberosHelper {
     }
   }
 
+  @Override
+  public Map<String, Set<String>> translateConfigurationSpecifications(Collection<String> configurationSpecifications) {
+    Map<String, Set<String>> translation = null;
+
+    if (configurationSpecifications != null) {
+      translation = new HashMap<>();
+
+      for (String configurationSpecification : configurationSpecifications) {
+        Matcher m = KerberosDescriptor.AUTH_TO_LOCAL_PROPERTY_SPECIFICATION_PATTERN.matcher(configurationSpecification);
+
+        if (m.matches()) {
+          String configType = m.group(1);
+          String propertyName = m.group(2);
+
+          if (configType == null) {
+            configType = "";
+          }
+
+          Set<String> propertyNames = translation.get(configType);
+          if(propertyNames == null) {
+            propertyNames = new HashSet<>();
+            translation.put(configType, propertyNames);
+          }
+          propertyNames.add(propertyName);
+        }
+      }
+    }
+
+    return translation;
+  }
+
   /**
    * Creates the principal and cached keytab file for the specified identity, if it is determined to
    * be of the expected type - user (headless) or service.
@@ -2072,8 +2135,6 @@ public class KerberosHelperImpl implements KerberosHelper {
       KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster, false);
       KerberosIdentityDataFileWriter kerberosIdentityDataFileWriter = null;
 
-      Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
-
       // This is needed to help determine which hosts to perform actions for and create tasks for.
       Set<String> hostsWithValidKerberosClient = getHostsWithValidKerberosClient(cluster);
 
@@ -2088,7 +2149,7 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       // Calculate the current non-host-specific configurations. These will be used to replace
       // variables within the Kerberos descriptor data
-      Map<String, Map<String, String>> configurations = calculateConfigurations(cluster, null, kerberosDescriptorProperties);
+      Map<String, Map<String, String>> configurations = calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
 
       String principal = variableReplacementHelper.replaceVariables("${kerberos-env/service_check_principal_name}@${realm}", configurations);
       String principalType = "user";
@@ -2839,31 +2900,14 @@ public class KerberosHelperImpl implements KerberosHelper {
   }
 
   /**
-   * Get the default Kerberos descriptor from the stack, which is the same as the value from
-   * <code>stacks/:stackName/versions/:version/artifacts/kerberos_descriptor</code>
+   * Get the default Kerberos descriptor from the specified stack.
    *
-   * @param cluster                 the cluster
+   * @param stackId                 the relevant stack ID
    * @param includePreconfigureData <code>true</code> to include the preconfigure data; otherwise false
    * @return a Kerberos Descriptor
    * @throws AmbariException if an error occurs while retrieving the Kerberos descriptor
    */
-  private KerberosDescriptor getKerberosDescriptorFromStack(Cluster cluster, boolean includePreconfigureData) throws AmbariException {
-    // !!! FIXME in a per-service view, what does this become?
-    Set<StackId> stackIds = new HashSet<>();
-
-    for (Service service : cluster.getServices().values()) {
-      stackIds.add(service.getDesiredStackId());
-    }
-
-    if (1 != stackIds.size()) {
-      throw new AmbariException("Services are deployed from multiple stacks and cannot determine a unique one.");
-    }
-
-    StackId stackId = stackIds.iterator().next();
-
-    // -------------------------------
-    // Get the default Kerberos descriptor from the stack, which is the same as the value from
-    // stacks/:stackName/versions/:version/artifacts/kerberos_descriptor
+  private KerberosDescriptor getKerberosDescriptorFromStack(StackId stackId, boolean includePreconfigureData) throws AmbariException {
     return ambariMetaInfo.getKerberosDescriptor(stackId.getStackName(), stackId.getStackVersion(), includePreconfigureData);
     // -------------------------------
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
index 178d129..4e63f4a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -38,6 +37,7 @@ import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.commons.collections.CollectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -99,36 +99,31 @@ public class PrepareDisableKerberosServerAction extends AbstractPrepareKerberosS
       actionLog.writeStdOut(String.format("Processing %d components", schCount));
     }
 
-    Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
     Set<String> services = cluster.getServices().keySet();
     boolean includeAmbariIdentity = "true".equalsIgnoreCase(getCommandParameterValue(commandParameters, KerberosServerAction.INCLUDE_AMBARI_IDENTITY));
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
 
     // Calculate the current host-specific configurations. These will be used to replace
     // variables within the Kerberos descriptor data
-    Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptorProperties);
+    Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
 
     processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
         configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore);
 
     // Add auth-to-local configurations to the set of changes
-    Set<String> authToLocalProperties = kerberosDescriptor.getAllAuthToLocalProperties();
+    Map<String, Set<String>> authToLocalProperties = kerberosHelper.translateConfigurationSpecifications(kerberosDescriptor.getAllAuthToLocalProperties());
     if (authToLocalProperties != null) {
-      for (String authToLocalProperty : authToLocalProperties) {
-        Matcher m = KerberosDescriptor.AUTH_TO_LOCAL_PROPERTY_SPECIFICATION_PATTERN.matcher(authToLocalProperty);
-
-        if (m.matches()) {
-          String configType = m.group(1);
-          String propertyName = m.group(2);
-
-          if (configType == null) {
-            configType = "";
-          }
-
-          // Add existing auth_to_local configuration, if set
-          Map<String, String> configuration = kerberosConfigurations.get(configType);
-          if (configuration != null) {
-            configuration.put(propertyName, "DEFAULT");
+      for (Map.Entry<String, Set<String>> entry : authToLocalProperties.entrySet()) {
+        String configType = entry.getKey();
+        Set<String> propertyNames = entry.getValue();
+
+        if (!CollectionUtils.isEmpty(propertyNames)) {
+          for (String propertyName : propertyNames) {
+            // Add existing auth_to_local configuration, if set
+            Map<String, String> configuration = kerberosConfigurations.get(configType);
+            if (configuration != null) {
+              configuration.put(propertyName, "DEFAULT");
+            }
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
index da83a74..e13f033 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
@@ -108,14 +108,13 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
     }
 
     KerberosHelper kerberosHelper = getKerberosHelper();
-    Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
     Map<String, Set<String>> propertiesToRemove = new HashMap<>();
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
     Set<String> services = cluster.getServices().keySet();
 
     // Calculate the current host-specific configurations. These will be used to replace
     // variables within the Kerberos descriptor data
-    Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptorProperties);
+    Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
 
     processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
         configurations, kerberosConfigurations, true, propertiesToIgnore);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
index 581067f..00c82a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
@@ -84,7 +84,6 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
     }
 
     KerberosHelper kerberosHelper = getKerberosHelper();
-    Map<String, String> kerberosDescriptorProperties = kerberosDescriptor.getProperties();
     Set<String> services = cluster.getServices().keySet();
     Map<String, Set<String>> propertiesToRemove = new HashMap<>();
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
@@ -92,7 +91,7 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
 
     // Calculate the current host-specific configurations. These will be used to replace
     // variables within the Kerberos descriptor data
-    Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptorProperties);
+    Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
 
     processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
         configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore);
@@ -101,7 +100,7 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
         propertiesToIgnore, propertiesToRemove, true);
 
     if ("true".equalsIgnoreCase(getCommandParameterValue(commandParameters, UPDATE_CONFIGURATIONS))) {
-      Map<String, Map<String, String>> calculatedConfigurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor.getProperties());
+      Map<String, Map<String, String>> calculatedConfigurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
       processAuthToLocalRules(cluster, calculatedConfigurations, kerberosDescriptor, schToProcess, kerberosConfigurations, getDefaultRealm(commandParameters), false);
       processConfigurationChanges(dataDirectory, kerberosConfigurations, propertiesToRemove);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
new file mode 100644
index 0000000..697f1d1
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
@@ -0,0 +1,573 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.serveraction.upgrades;
+
+import static org.apache.ambari.server.controller.KerberosHelper.DEFAULT_REALM;
+import static org.apache.ambari.server.controller.KerberosHelper.KERBEROS_ENV;
+import static org.apache.ambari.server.controller.KerberosHelper.PRECONFIGURE_SERVICES;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.serveraction.kerberos.PreconfigureServiceType;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.inject.Inject;
+
+/**
+ * PreconfigureKerberos updates existing service configurations with properties from service-level
+ * Kerberos descriptors, flagged for pre-configuring, during stack upgrades in order to prevent service
+ * restarts when the flagged services are installed.
+ */
+public class PreconfigureKerberosAction extends AbstractUpgradeServerAction {
+  static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
+
+  @Inject
+  private AmbariManagementController ambariManagementController;
+
+  @Inject
+  private KerberosHelper kerberosHelper;
+
+  @Inject
+  private ConfigHelper configHelper;
+
+  @Inject
+  private VariableReplacementHelper variableReplacementHelper;
+
+  @Override
+  public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws AmbariException, InterruptedException {
+    Map<String, String> commandParameters = getCommandParameters();
+    if (null == commandParameters || commandParameters.isEmpty()) {
+      return createCommandReport(0, HostRoleStatus.FAILED, "{}", "",
+          "Unable to change configuration values without command parameters");
+    }
+
+    if (!isDowngrade()) {
+      String clusterName = commandParameters.get("clusterName");
+      Cluster cluster = m_clusters.getCluster(clusterName);
+
+      if (cluster.getSecurityType() == SecurityType.KERBEROS) {
+        StackId stackId;
+
+        try {
+          stackId = getTargetStackId(cluster);
+        } catch (AmbariException e) {
+          return createCommandReport(0, HostRoleStatus.FAILED, "{}", "", e.getLocalizedMessage());
+        }
+
+        if (stackId == null) {
+          return createCommandReport(0, HostRoleStatus.FAILED, "{}", "",
+              "The target stack Id was not specified.");
+        }
+
+        KerberosDescriptor kerberosDescriptor = kerberosHelper.getKerberosDescriptor(KerberosHelper.KerberosDescriptorType.COMPOSITE, cluster, stackId, true);
+
+        // Calculate the current host-specific configurations. These will be used to replace
+        // variables within the Kerberos descriptor data
+        Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, true, false);
+
+        PreconfigureServiceType preconfigureServiceType = getPreconfigureServiceType(configurations);
+
+        if (preconfigureServiceType != PreconfigureServiceType.NONE) {
+          Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
+          Map<String, Set<String>> propertiesToRemove = new HashMap<>();
+          Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
+
+          if (preconfigureServiceType == PreconfigureServiceType.ALL) {
+            // Force all services to be flagged for pre-configuration...
+            Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+            if (serviceDescriptors != null) {
+              for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
+                serviceDescriptor.setPreconfigure(true);
+              }
+            }
+          }
+
+          processServiceComponentHosts(cluster, kerberosDescriptor, configurations, kerberosConfigurations, propertiesToIgnore);
+
+          // Calculate the set of configurations to update and replace any variables
+          // using the previously calculated Map of configurations for the host.
+          kerberosConfigurations = kerberosHelper.processPreconfiguredServiceConfigurations(kerberosConfigurations, configurations, cluster, kerberosDescriptor);
+
+          Map<String, Set<String>> installedServices = calculateInstalledServices(cluster);
+
+          kerberosHelper.applyStackAdvisorUpdates(cluster, installedServices.keySet(), configurations, kerberosConfigurations,
+              propertiesToIgnore, propertiesToRemove, true);
+
+          kerberosHelper.setAuthToLocalRules(cluster, kerberosDescriptor, getDefaultRealm(configurations), installedServices,
+              configurations, kerberosConfigurations, true);
+
+          processConfigurationChanges(cluster, stackId, kerberosDescriptor, kerberosConfigurations, propertiesToRemove, configurations);
+        } else {
+          actionLog.writeStdOut("Skipping: This facility is only available when kerberos-env/preconfigure_services is not \"NONE\"");
+        }
+      } else {
+        actionLog.writeStdOut("Skipping: This facility is only available when Kerberos is enabled");
+      }
+    } else {
+      actionLog.writeStdOut("Skipping: This facility is only available during an upgrade");
+    }
+
+    return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
+  }
+
+  /**
+   * Given a Cluster object creates a map of service names to sets of the installed components for that
+   * service.
+   *
+   * @param cluster the cluster
+   * @return a map of (installed) service names to the relevant set of (installed) component names
+   */
+  private Map<String, Set<String>> calculateInstalledServices(Cluster cluster) {
+    Map<String, Set<String>> installedServices = new HashMap<>();
+    Map<String, Service> services = cluster.getServices();
+
+    for (Service service : services.values()) {
+      installedServices.put(service.getName(), service.getServiceComponents().keySet());
+    }
+
+    return installedServices;
+  }
+
+  /**
+   * Safely retrieves the specified property from the specified configuration type from a map of
+   * configurations.
+   *
+   * @param configurations the existing configurations for the cluster
+   * @return the requested value or null if the configuration does not exist
+   */
+  private String getValueFromConfiguration(Map<String, Map<String, String>> configurations, String configType, String propertyName) {
+    String value = null;
+
+    if (configurations != null) {
+      Map<String, String> kerberosEnv = configurations.get(configType);
+
+      if (kerberosEnv != null) {
+        value = kerberosEnv.get(propertyName);
+      }
+    }
+
+    return value;
+  }
+
+  /**
+   * Safely retrieves the <code>realm</code> property of the <code>kerberos-env</code> configuration.
+   *
+   * @param configurations the existing configurations for the cluster
+   * @return the requested value or null if the configuration does not exist
+   * @see #getValueFromConfiguration(Map, String, String)
+   */
+  private String getDefaultRealm(Map<String, Map<String, String>> configurations) {
+    return getValueFromConfiguration(configurations, KERBEROS_ENV, DEFAULT_REALM);
+  }
+
+  /**
+   * Safely retrieves the <code>preconfigure_services</code> property of the <code>kerberos-env</code> configuration.
+   *
+   * @param configurations the existing configurations for the cluster
+   * @return the requested value or null if the configuration does not exist
+   * @see #getValueFromConfiguration(Map, String, String)
+   */
+  private PreconfigureServiceType getPreconfigureServiceType(Map<String, Map<String, String>> configurations) {
+    String preconfigureServices = getValueFromConfiguration(configurations, KERBEROS_ENV, PRECONFIGURE_SERVICES);
+
+    PreconfigureServiceType preconfigureServiceType = null;
+    if (!StringUtils.isEmpty(preconfigureServices)) {
+      try {
+        preconfigureServiceType = PreconfigureServiceType.valueOf(preconfigureServices.toUpperCase());
+      } catch (Throwable t) {
+        preconfigureServiceType = PreconfigureServiceType.DEFAULT;
+      }
+    }
+
+    return (preconfigureServiceType == null) ? PreconfigureServiceType.DEFAULT : preconfigureServiceType;
+  }
+
+  /**
+   * Determines if upgrade direction is {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
+   *
+   * @return {@code true} if {@link Direction#DOWNGRADE}; {@code false} if {@link Direction#UPGRADE}
+   */
+  private boolean isDowngrade() {
+    return Direction.DOWNGRADE.name().equalsIgnoreCase(getCommandParameterValue(UPGRADE_DIRECTION_KEY));
+  }
+
+  /**
+   * Retrieves the target stack ID for the stack upgrade or downgrade operation.
+   *
+   * @param cluster the cluster
+   * @return the target {@link StackId}
+   * @throws AmbariException if multiple stack id's are detected
+   */
+  private StackId getTargetStackId(Cluster cluster) throws AmbariException {
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
+
+    // !!! FIXME in a per-service view, what does this become?
+    Set<StackId> stackIds = new HashSet<>();
+
+    for (Service service : cluster.getServices().values()) {
+      RepositoryVersionEntity targetRepoVersion = upgradeContext.getTargetRepositoryVersion(service.getName());
+      StackId targetStackId = targetRepoVersion.getStackId();
+      stackIds.add(targetStackId);
+    }
+
+    if (1 != stackIds.size()) {
+      throw new AmbariException("Services are deployed from multiple stacks and cannot determine a unique one.");
+    }
+
+    return stackIds.iterator().next();
+  }
+
+  /**
+   * Find and iterate through the {@link ServiceComponentHost} objects for the current {@link Cluster}
+   * to calculate property updates and auth-to-local rules.
+   *
+   * @param cluster                the cluster
+   * @param kerberosDescriptor     the Kerberos descriptor
+   * @param currentConfigurations  the current configurations for the cluster
+   * @param kerberosConfigurations the (Kerberos-specific) configuration updates
+   * @param propertiesToBeIgnored  a map to store properties that should be ignored by operations that update property values
+   * @throws AmbariException if an issue occurs
+   */
+  private void processServiceComponentHosts(Cluster cluster, KerberosDescriptor kerberosDescriptor,
+                                            Map<String, Map<String, String>> currentConfigurations,
+                                            Map<String, Map<String, String>> kerberosConfigurations,
+                                            Map<String, Set<String>> propertiesToBeIgnored)
+      throws AmbariException {
+
+    Collection<Host> hosts = cluster.getHosts();
+    if (!hosts.isEmpty()) {
+      // Create the context to use for filtering Kerberos Identities based on the state of the cluster
+      Map<String, Object> filterContext = new HashMap<>();
+      filterContext.put("configurations", currentConfigurations);
+      filterContext.put("services", cluster.getServices().keySet());
+
+      try {
+        Map<String, Set<String>> propertiesToIgnore = null;
+
+        for (Host host : hosts) {
+          // Iterate over the components installed on the current host to get the service and
+          // component-level Kerberos descriptors in order to determine which principals,
+          // keytab files, and configurations need to be created or updated.
+          for (ServiceComponentHost sch : cluster.getServiceComponentHosts(host.getHostName())) {
+            String hostName = sch.getHostName();
+
+            String serviceName = sch.getServiceName();
+            String componentName = sch.getServiceComponentName();
+
+            KerberosServiceDescriptor serviceDescriptor = kerberosDescriptor.getService(serviceName);
+
+            if (serviceDescriptor != null) {
+              List<KerberosIdentityDescriptor> serviceIdentities = serviceDescriptor.getIdentities(true, filterContext);
+
+              // Add service-level principals (and keytabs)
+              kerberosHelper.addIdentities(null, serviceIdentities,
+                  null, hostName, serviceName, componentName, kerberosConfigurations, currentConfigurations);
+              propertiesToIgnore = gatherPropertiesToIgnore(serviceIdentities, propertiesToIgnore);
+
+              KerberosComponentDescriptor componentDescriptor = serviceDescriptor.getComponent(componentName);
+
+              if (componentDescriptor != null) {
+                List<KerberosIdentityDescriptor> componentIdentities = componentDescriptor.getIdentities(true, filterContext);
+
+                // Calculate the set of configurations to update and replace any variables
+                // using the previously calculated Map of configurations for the host.
+                kerberosHelper.mergeConfigurations(kerberosConfigurations,
+                    componentDescriptor.getConfigurations(true), currentConfigurations, null);
+
+                // Add component-level principals (and keytabs)
+                kerberosHelper.addIdentities(null, componentIdentities,
+                    null, hostName, serviceName, componentName, kerberosConfigurations, currentConfigurations);
+                propertiesToIgnore = gatherPropertiesToIgnore(componentIdentities, propertiesToIgnore);
+              }
+            }
+          }
+        }
+
+        // Add ambari-server identities only if 'kerberos-env.create_ambari_principal = true'
+        if (kerberosHelper.createAmbariIdentities(currentConfigurations.get(KERBEROS_ENV))) {
+          List<KerberosIdentityDescriptor> ambariIdentities = kerberosHelper.getAmbariServerIdentities(kerberosDescriptor);
+
+          for (KerberosIdentityDescriptor identity : ambariIdentities) {
+            // If the identity represents the ambari-server user, use the component name "AMBARI_SERVER_SELF"
+            // so it can be distinguished between other identities related to the AMBARI-SERVER
+            // component.
+            String componentName = KerberosHelper.AMBARI_SERVER_KERBEROS_IDENTITY_NAME.equals(identity.getName())
+                ? "AMBARI_SERVER_SELF"
+                : "AMBARI_SERVER";
+
+            List<KerberosIdentityDescriptor> componentIdentities = Collections.singletonList(identity);
+            kerberosHelper.addIdentities(null, componentIdentities,
+                null, KerberosHelper.AMBARI_SERVER_HOST_NAME, "AMBARI", componentName, kerberosConfigurations, currentConfigurations);
+            propertiesToIgnore = gatherPropertiesToIgnore(componentIdentities, propertiesToIgnore);
+          }
+        }
+
+        if ((propertiesToBeIgnored != null) && (propertiesToIgnore != null)) {
+          propertiesToBeIgnored.putAll(propertiesToIgnore);
+        }
+      } catch (IOException e) {
+        throw new AmbariException(e.getMessage(), e);
+      }
+    }
+  }
+
+  private Map<String, Set<String>> gatherPropertiesToIgnore(List<KerberosIdentityDescriptor> identities,
+                                                            Map<String, Set<String>> propertiesToIgnore) {
+    Map<String, Map<String, String>> identityConfigurations = kerberosHelper.getIdentityConfigurations(identities);
+    if (!MapUtils.isEmpty(identityConfigurations)) {
+      if (propertiesToIgnore == null) {
+        propertiesToIgnore = new HashMap<>();
+      }
+
+      for (Map.Entry<String, Map<String, String>> entry : identityConfigurations.entrySet()) {
+        String configType = entry.getKey();
+        Map<String, String> properties = entry.getValue();
+
+        if (MapUtils.isEmpty(properties)) {
+          Set<String> propertyNames = propertiesToIgnore.get(configType);
+          if (propertyNames == null) {
+            propertyNames = new HashSet<>();
+            propertiesToIgnore.put(configType, propertyNames);
+          }
+          propertyNames.addAll(properties.keySet());
+        }
+      }
+    }
+
+    return propertiesToIgnore;
+  }
+
+  /**
+   * Processes configuration changes to determine if any work needs to be done.
+   * <p/>
+   * If work is to be done, a data file containing the details is created so it they changes may be
+   * processed in the appropriate stage.
+   *
+   * @param cluster                the cluster
+   * @param targetStackId          the target stack id
+   * @param kerberosConfigurations the Kerberos-specific configuration map
+   * @param propertiesToBeRemoved  a map of properties to be removed from the current configuration,
+   *                               grouped by configuration type.
+   * @param variableReplaments     replacement values to use when attempting to perform variable replacements on the property names
+   * @throws AmbariException if an issue is encountered
+   */
+  private void processConfigurationChanges(Cluster cluster, StackId targetStackId,
+                                           KerberosDescriptor kerberosDescriptor,
+                                           Map<String, Map<String, String>> kerberosConfigurations,
+                                           Map<String, Set<String>> propertiesToBeRemoved,
+                                           Map<String, Map<String, String>> variableReplaments)
+      throws AmbariException {
+    actionLog.writeStdOut("Determining configuration changes");
+
+    if (!kerberosConfigurations.isEmpty()) {
+      Map<String, Service> installedServices = cluster.getServices();
+
+      // Build a map of configuration types to properties that indicate which properties should be altered
+      // This map should contain only properties defined in service-level Kerberos descriptors that
+      // have been flagged to be preconfigured and that have not yet been installed.
+      Map<String, Set<String>> propertyFilter = new HashMap<>();
+      Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+      if (serviceDescriptors != null) {
+        for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
+          if (!installedServices.containsKey(serviceDescriptor.getName()) && serviceDescriptor.shouldPreconfigure()) {
+            buildFilter(Collections.singleton(serviceDescriptor), propertyFilter, variableReplaments);
+          }
+        }
+      }
+
+      // Add the auth-to-local rule configuration specifications to the filter
+      Map<String, Set<String>> authToLocalProperties = kerberosHelper.translateConfigurationSpecifications(kerberosDescriptor.getAllAuthToLocalProperties());
+      if (!MapUtils.isEmpty(authToLocalProperties)) {
+        for (Map.Entry<String, Set<String>> entry : authToLocalProperties.entrySet()) {
+          Set<String> properties = entry.getValue();
+
+          if (!CollectionUtils.isEmpty(properties)) {
+            String configurationType = entry.getKey();
+
+            Set<String> propertyNames = propertyFilter.get(configurationType);
+            if (propertyNames == null) {
+              propertyNames = new HashSet<>();
+              propertyFilter.put(configurationType, propertyNames);
+            }
+
+            propertyNames.addAll(properties);
+          }
+        }
+      }
+
+      Set<String> visitedTypes = new HashSet<>();
+
+      for (Map.Entry<String, Map<String, String>> entry : kerberosConfigurations.entrySet()) {
+        String configType = entry.getKey();
+
+        String service = cluster.getServiceByConfigType(configType);
+        Set<String> allowedProperties = propertyFilter.get(configType);
+
+        // Update properties for services that are installed and not filtered out
+        if (installedServices.containsKey(service) && !CollectionUtils.isEmpty(allowedProperties)) {
+          Map<String, String> propertiesToUpdate = entry.getValue();
+          Set<String> propertiesToRemove = (propertiesToBeRemoved == null) ? null : propertiesToBeRemoved.get(configType);
+
+          // Filter the properties to update
+          if (propertiesToUpdate != null) {
+            Iterator<Map.Entry<String, String>> mapIterator = propertiesToUpdate.entrySet().iterator();
+            while (mapIterator.hasNext()) {
+              Map.Entry<String, String> mapEntry = mapIterator.next();
+
+              if (!allowedProperties.contains(mapEntry.getKey())) {
+                mapIterator.remove();
+              }
+            }
+          }
+
+          // Filter the properties to remove
+          if (propertiesToRemove != null) {
+            Iterator<String> setIterator = propertiesToRemove.iterator();
+            while (setIterator.hasNext()) {
+              String setEntry = setIterator.next();
+              if (!allowedProperties.contains(setEntry)) {
+                setIterator.remove();
+              }
+            }
+          }
+
+          visitedTypes.add(configType);
+
+          if (!MapUtils.isEmpty(propertiesToUpdate) || !CollectionUtils.isEmpty(propertiesToRemove)) {
+            if (!MapUtils.isEmpty(propertiesToUpdate)) {
+              for (Map.Entry<String, String> property : propertiesToUpdate.entrySet()) {
+                actionLog.writeStdOut(String.format("Setting: %s/%s = %s", configType, property.getKey(), property.getValue()));
+              }
+            }
+
+            if (!CollectionUtils.isEmpty(propertiesToRemove)) {
+              for (String property : propertiesToRemove) {
+                actionLog.writeStdOut(String.format("Removing: %s/%s", configType, property));
+              }
+            }
+
+            configHelper.updateConfigType(cluster, targetStackId,
+                ambariManagementController, configType, propertiesToUpdate, propertiesToRemove,
+                ambariManagementController.getAuthName(), "Preconfiguring for Kerberos during upgrade");
+          }
+        }
+      }
+
+      if (!MapUtils.isEmpty(propertiesToBeRemoved)) {
+        for (Map.Entry<String, Set<String>> entry : propertiesToBeRemoved.entrySet()) {
+          String configType = entry.getKey();
+
+          if (!visitedTypes.contains(configType)) {
+            Set<String> propertiesToRemove = entry.getValue();
+
+            if (!CollectionUtils.isEmpty(propertiesToRemove)) {
+              for (String property : propertiesToRemove) {
+                actionLog.writeStdOut(String.format("Removing: %s/%s", configType, property));
+              }
+
+              configHelper.updateConfigType(cluster, targetStackId,
+                  ambariManagementController, configType, null, entry.getValue(),
+                  ambariManagementController.getAuthName(), "Preconfiguring for Kerberos during upgrade");
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Adds entries to the property filter (<code>propertyFilter</code>) found in the {@link KerberosConfigurationDescriptor}s
+   * within the specified node of the Kerberos descriptor.
+   *
+   * @param containers     the Kerberos descriptor containers to process
+   * @param propertyFilter the property filter map to update
+   * @param replacements   replacement values to use when attempting to perform variable replacements on the property names
+   * @throws AmbariException if an issue occurs while replacing variables in the property names
+   */
+  private void buildFilter(Collection<? extends AbstractKerberosDescriptorContainer> containers,
+                           Map<String, Set<String>> propertyFilter,
+                           Map<String, Map<String, String>> replacements)
+      throws AmbariException {
+    if (containers != null) {
+      for (AbstractKerberosDescriptorContainer container : containers) {
+        Map<String, KerberosConfigurationDescriptor> configurationDescriptors = container.getConfigurations(false);
+
+        if (!MapUtils.isEmpty(configurationDescriptors)) {
+          for (KerberosConfigurationDescriptor configurationDescriptor : configurationDescriptors.values()) {
+            Map<String, String> properties = configurationDescriptor.getProperties();
+
+            if (!MapUtils.isEmpty(properties)) {
+              String configType = configurationDescriptor.getType();
+
+              Set<String> propertyNames = propertyFilter.get(configType);
+              if (propertyNames == null) {
+                propertyNames = new HashSet<>();
+                propertyFilter.put(configType, propertyNames);
+              }
+
+              // Replace variables in the property name. For example ${knox-env/knox_user}.
+              for (String propertyName : properties.keySet()) {
+                propertyNames.add(variableReplacementHelper.replaceVariables(propertyName, replacements));
+              }
+            }
+          }
+        }
+
+        Collection<? extends AbstractKerberosDescriptorContainer> childContainers = container.getChildContainers();
+        if (childContainers != null) {
+          buildFilter(childContainers, propertyFilter, replacements);
+        }
+      }
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 492c308..90a7d97 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -669,6 +669,18 @@
     </group>
 
     <!--
+    After processing this group, items declared to be preconfigured will be applied to the existing
+    configurations.
+    -->
+    <group xsi:type="cluster" name="PRECONFIGURE_COMMON_KERBEROS_PROPERTIES" title="Preconfigure Kerberos-related properties">
+      <condition xsi:type="security" type="kerberos"/>
+      <direction>UPGRADE</direction>
+      <execute-stage title="Preconfigure Kerberos-related properties">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.PreconfigureKerberosAction"/>
+      </execute-stage>
+    </group>
+
+    <!--
     Invoke "hdp-select set all" to change any components we may have missed
     that are installed on the hosts but not known by Ambari.
     -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 995a1d3..5ee82c3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -148,6 +148,18 @@
       </execute-stage>
     </group>
 
+    <!--
+    After processing this group, items declared to be preconfigured will be applied to the existing
+    configurations.
+    -->
+    <group xsi:type="cluster" name="PRECONFIGURE_COMMON_KERBEROS_PROPERTIES" title="Preconfigure Kerberos-related properties">
+      <condition xsi:type="security" type="kerberos"/>
+      <direction>UPGRADE</direction>
+      <execute-stage title="Preconfigure Kerberos-related properties">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.PreconfigureKerberosAction"/>
+      </execute-stage>
+    </group>
+
     <group name="CORE_MASTER" title="Core Masters">
       <service-check>false</service-check>
       <service name="HDFS">


[2/3] ambari git commit: AMBARI-21809. Pre-configure services during stack upgrade if Kerberos is enabled to reduce number of core service restarts when services are added (rlevas)

Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java
new file mode 100644
index 0000000..a7bf33c
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosActionTest.java
@@ -0,0 +1,596 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.serveraction.upgrades;
+
+import static org.apache.ambari.server.serveraction.upgrades.PreconfigureKerberosAction.UPGRADE_DIRECTION_KEY;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
+import static org.easymock.EasyMock.newCapture;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl;
+import org.apache.ambari.server.actionmanager.RequestFactory;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorHelper;
+import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorRequest;
+import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
+import org.apache.ambari.server.audit.AuditLogger;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.KerberosHelperImpl;
+import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
+import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.KerberosPrincipalDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.security.encryption.CredentialStoreService;
+import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.stageplanner.RoleGraphFactory;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.topology.PersistedState;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.MapUtils;
+import org.easymock.Capture;
+import org.easymock.EasyMockSupport;
+import org.easymock.IAnswer;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+
+public class PreconfigureKerberosActionTest extends EasyMockSupport {
+
+  private static final String CLUSTER_NAME = "c1";
+
+  @Test
+  public void testSkipWhenDowngrade() throws Exception {
+
+    Injector injector = getInjector();
+
+    Map<String, String> commandParams = getDefaultCommandParams();
+    commandParams.put(UPGRADE_DIRECTION_KEY, Direction.DOWNGRADE.name());
+
+    ExecutionCommand executionCommand = createMockExecutionCommand(commandParams);
+
+    replayAll();
+
+    injector.getInstance(AmbariMetaInfo.class).init();
+
+    PreconfigureKerberosAction action = injector.getInstance(PreconfigureKerberosAction.class);
+    ConcurrentMap<String, Object> context = new ConcurrentHashMap<>();
+    action.setExecutionCommand(executionCommand);
+    action.execute(context);
+
+    verifyAll();
+  }
+
+  @Test
+  public void testSkipWhenNotKerberos() throws Exception {
+    Injector injector = getInjector();
+
+    ExecutionCommand executionCommand = createMockExecutionCommand(getDefaultCommandParams());
+
+    Cluster cluster = createMockCluster(SecurityType.NONE, Collections.<Host>emptyList(),
+        Collections.<String, Service>emptyMap(), Collections.<String, List<ServiceComponentHost>>emptyMap(),
+        createNiceMock(StackId.class), Collections.<String, Config>emptyMap());
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    expect(clusters.getCluster(CLUSTER_NAME)).andReturn(cluster).atLeastOnce();
+
+    replayAll();
+
+    injector.getInstance(AmbariMetaInfo.class).init();
+
+    PreconfigureKerberosAction action = injector.getInstance(PreconfigureKerberosAction.class);
+    ConcurrentMap<String, Object> context = new ConcurrentHashMap<>();
+    action.setExecutionCommand(executionCommand);
+    action.execute(context);
+
+    verifyAll();
+  }
+
+  private Host createMockHost(String hostname) {
+    Host host = createNiceMock(Host.class);
+    expect(host.getHostName()).andReturn(hostname).anyTimes();
+    return host;
+  }
+
+  @Test
+  public void testUpgrade() throws Exception {
+    Capture<? extends Map<String, String>> captureCoreSiteProperties = newCapture();
+
+    Injector injector = getInjector();
+
+    ExecutionCommand executionCommand = createMockExecutionCommand(getDefaultCommandParams());
+
+    UpgradeEntity upgradeProgress = createMock(UpgradeEntity.class);
+
+    StackId targetStackId = createMock(StackId.class);
+    expect(targetStackId.getStackId()).andReturn("HDP-2.6").anyTimes();
+    expect(targetStackId.getStackName()).andReturn("HDP").anyTimes();
+    expect(targetStackId.getStackVersion()).andReturn("2.6").anyTimes();
+
+    final String hostName1 = "c6401.ambari.apache.org";
+    final String hostName2 = "c6402.ambari.apache.org";
+    final String hostName3 = "c6403.ambari.apache.org";
+
+    final Host host1 = createMockHost(hostName1);
+    Host host2 = createMockHost(hostName2);
+    Host host3 = createMockHost(hostName3);
+    Map<String, Host> hosts = new HashMap<>();
+    hosts.put(hostName1, host1);
+    hosts.put(hostName2, host2);
+    hosts.put(hostName3, host3);
+
+    Map<String, ServiceComponentHost> nnSchs = Collections.singletonMap(hostName1, createMockServiceComponentHost("HDFS", "NAMENODE", hostName1, host1));
+    Map<String, ServiceComponentHost> rmSchs = Collections.singletonMap(hostName2, createMockServiceComponentHost("YARN", "RESOURCEMANAGER", hostName2, host2));
+    Map<String, ServiceComponentHost> nmSchs = Collections.singletonMap(hostName2, createMockServiceComponentHost("YARN", "NODEMANAGER", hostName2, host2));
+    Map<String, ServiceComponentHost> dnSchs = new HashMap<>();
+    final Map<String, ServiceComponentHost> hcSchs = new HashMap<>();
+    Map<String, ServiceComponentHost> zkSSchs = new HashMap<>();
+    Map<String, ServiceComponentHost> zkCSchs = new HashMap<>();
+    Map<String, List<ServiceComponentHost>> serviceComponentHosts = new HashMap<>();
+
+    for (Map.Entry<String, Host> entry : hosts.entrySet()) {
+      String hostname = entry.getKey();
+      List<ServiceComponentHost> list = new ArrayList<>();
+      ServiceComponentHost sch;
+
+      sch = createMockServiceComponentHost("HDFS", "DATANODE", hostname, entry.getValue());
+      dnSchs.put(hostname, sch);
+      list.add(sch);
+
+      sch = createMockServiceComponentHost("HDFS", "HDFS_CLIENT", hostname, entry.getValue());
+      hcSchs.put(hostname, sch);
+      list.add(sch);
+
+      sch = createMockServiceComponentHost("ZOOKEEPER", "ZOOKEEPER_SERVER", hostname, entry.getValue());
+      zkSSchs.put(hostname, sch);
+      list.add(sch);
+
+      sch = createMockServiceComponentHost("ZOOKEEPER", "ZOOKEEPER_CLIENT", hostname, entry.getValue());
+      zkCSchs.put(hostname, sch);
+      list.add(sch);
+
+      serviceComponentHosts.put(hostname, list);
+    }
+
+
+    Map<String, ServiceComponent> hdfsComponents = new HashMap<>();
+    hdfsComponents.put("NAMENODE", createMockServiceComponent("NAMENODE", false, nnSchs));
+    hdfsComponents.put("DATANODE", createMockServiceComponent("DATANODE", false, dnSchs));
+    hdfsComponents.put("HDFS_CLIENT", createMockServiceComponent("HDFS_CLIENT", true, hcSchs));
+
+    Map<String, ServiceComponent> yarnComponents = new HashMap<>();
+    yarnComponents.put("RESOURCEMANAGER", createMockServiceComponent("RESOURCEMANAGER", false, rmSchs));
+    yarnComponents.put("NODEMANAGER", createMockServiceComponent("NODEMANAGER", false, nmSchs));
+
+    Map<String, ServiceComponent> zkCompnents = new HashMap<>();
+    yarnComponents.put("ZOOKEEPER_SERVER", createMockServiceComponent("ZOOKEEPER_SERVER", false, zkSSchs));
+    yarnComponents.put("ZOOKEEPER_CLIENT", createMockServiceComponent("ZOOKEEPER_CLIENT", true, zkCSchs));
+
+    Service hdfsService = createMockService("HDFS", hdfsComponents, targetStackId);
+    Service yarnService = createMockService("YARN", yarnComponents, targetStackId);
+    Service zkService = createMockService("ZOOKEEPER", zkCompnents, targetStackId);
+
+    Map<String, Service> installedServices = new HashMap<>();
+    installedServices.put("HDFS", hdfsService);
+    installedServices.put("YARN", yarnService);
+    installedServices.put("ZOOKEEPER", zkService);
+
+    Map<String, Map<String, String>> clusterConfig = getClusterConfig();
+
+    Map<String, Config> clusterConfigs = new HashMap<>();
+    for (Map.Entry<String, Map<String, String>> entry : clusterConfig.entrySet()) {
+      clusterConfigs.put(entry.getKey(), createMockConfig(entry.getValue()));
+    }
+
+    Cluster cluster = createMockCluster(SecurityType.KERBEROS, hosts.values(), installedServices, serviceComponentHosts, targetStackId, clusterConfigs);
+    expect(cluster.getUpgradeInProgress()).andReturn(upgradeProgress).once();
+
+    RepositoryVersionEntity targetRepositoryVersion = createMock(RepositoryVersionEntity.class);
+    expect(targetRepositoryVersion.getStackId()).andReturn(targetStackId).atLeastOnce();
+
+    UpgradeContext upgradeContext = createMock(UpgradeContext.class);
+    expect(upgradeContext.getTargetRepositoryVersion(anyString())).andReturn(targetRepositoryVersion).atLeastOnce();
+
+    UpgradeContextFactory upgradeContextFactory = injector.getInstance(UpgradeContextFactory.class);
+    expect(upgradeContextFactory.create(cluster, upgradeProgress)).andReturn(upgradeContext).once();
+
+    createMockClusters(injector, cluster);
+
+    List<PropertyInfo> knoxProperties = Arrays.asList(
+        crateMockPropertyInfo("knox-env.xml", "knox_user", "knox"),
+        crateMockPropertyInfo("knox-env.xml", "knox_group", "knox"),
+        crateMockPropertyInfo("knox-env.xml", "knox_principal_name", "KERBEROS_PRINCIPAL"),
+        crateMockPropertyInfo("gateway-site.xml", "gateway.port", "8443"),
+        crateMockPropertyInfo("gateway-site.xml", "gateway.path", "gateway")
+    );
+
+    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    expect(ambariMetaInfo.getKerberosDescriptor("HDP", "2.6", false)).
+        andReturn(getKerberosDescriptor(false)).once();
+    expect(ambariMetaInfo.getKerberosDescriptor("HDP", "2.6", true)).
+        andReturn(getKerberosDescriptor(true)).once();
+    expect(ambariMetaInfo.isValidService("HDP", "2.6", "BEACON"))
+        .andReturn(false).anyTimes();
+    expect(ambariMetaInfo.isValidService("HDP", "2.6", "KNOX"))
+        .andReturn(true).anyTimes();
+    expect(ambariMetaInfo.getService("HDP", "2.6", "KNOX"))
+        .andReturn(createMockServiceInfo("KNOX", knoxProperties, Collections.singletonList(createMockComponentInfo("KNOX_GATEWAY")))).anyTimes();
+
+    AmbariManagementController managementController = injector.getInstance(AmbariManagementController.class);
+    expect(managementController.findConfigurationTagsWithOverrides(cluster, null))
+        .andReturn(clusterConfig).once();
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+    expect(configHelper.getEffectiveConfigProperties(cluster, clusterConfig)).andReturn(clusterConfig).anyTimes();
+    configHelper.updateConfigType(eq(cluster), eq(targetStackId), eq(managementController), eq("core-site"), capture(captureCoreSiteProperties), anyObject(Collection.class), eq("admin"), anyString());
+    expectLastCall().once();
+
+    TopologyManager topologyManager = injector.getInstance(TopologyManager.class);
+    expect(topologyManager.getPendingHostComponents()).andReturn(Collections.<String, Collection<String>>emptyMap()).anyTimes();
+
+    StackAdvisorHelper stackAdvisorHelper = injector.getInstance(StackAdvisorHelper.class);
+    expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class)))
+        .andAnswer(new IAnswer<RecommendationResponse>() {
+          @Override
+          public RecommendationResponse answer() throws Throwable {
+            Object[] args = getCurrentArguments();
+            StackAdvisorRequest request = (StackAdvisorRequest) args[0];
+            StackAdvisorRequest.StackAdvisorRequestType requestType = request.getRequestType();
+
+            if (requestType == StackAdvisorRequest.StackAdvisorRequestType.HOST_GROUPS) {
+              RecommendationResponse.Blueprint blueprint = new RecommendationResponse.Blueprint();
+              blueprint.setHostGroups(new HashSet<>(Arrays.asList(
+                  createRecommendationHostGroup(hostName1,
+                      Arrays.asList("ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", "HDFS_CLIENT", "DATANODE", "NAMENODE", "KNOX_GATEWAY")),
+                  createRecommendationHostGroup(hostName2,
+                      Arrays.asList("ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", "HDFS_CLIENT", "DATANODE", "RESOURCEMANAGER", "NODEMANAGER")),
+                  createRecommendationHostGroup(hostName3,
+                      Arrays.asList("ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", "HDFS_CLIENT", "DATANODE"))
+              )));
+              Set<RecommendationResponse.BindingHostGroup> bindingHostGroups = new HashSet<>(Arrays.asList(
+                  createBindingHostGroup(hostName1),
+                  createBindingHostGroup(hostName2),
+                  createBindingHostGroup(hostName3)
+              ));
+
+              RecommendationResponse.BlueprintClusterBinding binding = new RecommendationResponse.BlueprintClusterBinding();
+              binding.setHostGroups(bindingHostGroups);
+
+              RecommendationResponse.Recommendation recommendation = new RecommendationResponse.Recommendation();
+              recommendation.setBlueprint(blueprint);
+              recommendation.setBlueprintClusterBinding(binding);
+
+              RecommendationResponse response = new RecommendationResponse();
+              response.setRecommendations(recommendation);
+              return response;
+            } else {
+              return null;
+            }
+          }
+        })
+        .anyTimes();
+
+    replayAll();
+
+    ambariMetaInfo.init();
+    StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+    StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+
+    PreconfigureKerberosAction action = injector.getInstance(PreconfigureKerberosAction.class);
+    ConcurrentMap<String, Object> context = new ConcurrentHashMap<>();
+    action.setExecutionCommand(executionCommand);
+    action.execute(context);
+
+    verifyAll();
+
+    Assert.assertTrue(captureCoreSiteProperties.hasCaptured());
+
+    Map<String, String> capturedProperties = captureCoreSiteProperties.getValue();
+    Assert.assertFalse(MapUtils.isEmpty(capturedProperties));
+
+
+    String expectedAuthToLocalRules = "" +
+        "RULE:[1:$1@$0](ambari-qa-c1@EXAMPLE.COM)s/.*/ambari-qa/\n" +
+        "RULE:[1:$1@$0](hdfs-c1@EXAMPLE.COM)s/.*/hdfs/\n" +
+        "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
+        "RULE:[2:$1@$0](beacon@EXAMPLE.COM)s/.*/beacon/\n" +
+        "RULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\n" +
+        "RULE:[2:$1@$0](knox@EXAMPLE.COM)s/.*/knox/\n" +
+        "RULE:[2:$1@$0](nm@EXAMPLE.COM)s/.*/${yarn-env/yarn_user}/\n" +
+        "RULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\n" +
+        "RULE:[2:$1@$0](rm@EXAMPLE.COM)s/.*/${yarn-env/yarn_user}/\n" +
+        "DEFAULT";
+
+    Assert.assertEquals(3, capturedProperties.size());
+    Assert.assertEquals("users", capturedProperties.get("hadoop.proxyuser.knox.groups"));
+    Assert.assertEquals("c6401.ambari.apache.org", capturedProperties.get("hadoop.proxyuser.knox.hosts"));
+    Assert.assertEquals(expectedAuthToLocalRules, capturedProperties.get("hadoop.security.auth_to_local"));
+  }
+
+  private RecommendationResponse.BindingHostGroup createBindingHostGroup(String hostName) {
+    RecommendationResponse.BindingHostGroup bindingHostGroup = new RecommendationResponse.BindingHostGroup();
+    bindingHostGroup.setName(hostName);
+    bindingHostGroup.setHosts(Collections.singleton(Collections.singletonMap("fqdn", hostName)));
+    return bindingHostGroup;
+  }
+
+  private RecommendationResponse.HostGroup createRecommendationHostGroup(String hostName, List<String> components) {
+    Set<Map<String, String>> componentDetails = new HashSet<>();
+    for (String component : components) {
+      componentDetails.add(Collections.singletonMap("name", component));
+    }
+
+    RecommendationResponse.HostGroup hostGroup = new RecommendationResponse.HostGroup();
+    hostGroup.setComponents(componentDetails);
+    hostGroup.setName(hostName);
+    return hostGroup;
+  }
+
+  private ComponentInfo createMockComponentInfo(String componentName) {
+    ComponentInfo componentInfo = createMock(ComponentInfo.class);
+    expect(componentInfo.getName()).andReturn(componentName).anyTimes();
+    return componentInfo;
+  }
+
+  private PropertyInfo crateMockPropertyInfo(String fileName, String propertyName, String propertyValue) {
+    PropertyInfo propertyInfo = createMock(PropertyInfo.class);
+    expect(propertyInfo.getFilename()).andReturn(fileName).anyTimes();
+    expect(propertyInfo.getName()).andReturn(propertyName).anyTimes();
+    expect(propertyInfo.getValue()).andReturn(propertyValue).anyTimes();
+    return propertyInfo;
+  }
+
+  private ServiceInfo createMockServiceInfo(String name, List<PropertyInfo> properties, List<ComponentInfo> components) {
+    ServiceInfo serviceInfo = createMock(ServiceInfo.class);
+    expect(serviceInfo.getName()).andReturn(name).anyTimes();
+    expect(serviceInfo.getProperties()).andReturn(properties).anyTimes();
+    expect(serviceInfo.getComponents()).andReturn(components).anyTimes();
+    return serviceInfo;
+  }
+
+  private Map<String, Map<String, String>> getClusterConfig() throws URISyntaxException, FileNotFoundException {
+    URL url = ClassLoader.getSystemResource("PreconfigureActionTest_cluster_config.json");
+    return new Gson().fromJson(new FileReader(new File(url.toURI())),
+        new TypeToken<Map<String, Map<String, String>>>() {
+        }.getType());
+  }
+
+  private KerberosDescriptor getKerberosDescriptor(boolean includePreconfigureData) throws URISyntaxException, IOException {
+    URL url;
+
+    if (includePreconfigureData) {
+      url = ClassLoader.getSystemResource("PreconfigureActionTest_kerberos_descriptor_stack_preconfigure.json");
+    } else {
+      url = ClassLoader.getSystemResource("PreconfigureActionTest_kerberos_descriptor_stack.json");
+    }
+
+    return new KerberosDescriptorFactory().createInstance(new File(url.toURI()));
+  }
+
+  private ServiceComponent createMockServiceComponent(String name, Boolean isClientComponent, Map<String, ServiceComponentHost> serviceComponentHostMap) throws AmbariException {
+    ServiceComponent serviceComponent = createMock(ServiceComponent.class);
+    expect(serviceComponent.getName()).andReturn(name).anyTimes();
+    expect(serviceComponent.isClientComponent()).andReturn(isClientComponent).anyTimes();
+
+    for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHostMap.entrySet()) {
+      expect(serviceComponent.getServiceComponentHost(entry.getKey())).andReturn(serviceComponentHostMap.get(entry.getKey())).anyTimes();
+    }
+
+    expect(serviceComponent.getServiceComponentHosts()).andReturn(serviceComponentHostMap).anyTimes();
+
+    return serviceComponent;
+  }
+
+  private ServiceComponentHost createMockServiceComponentHost(String serviceName, String componentName, String hostname, Host host) {
+    ServiceComponentHost serviceComponentHost = createMock(ServiceComponentHost.class);
+    expect(serviceComponentHost.getServiceName()).andReturn(serviceName).anyTimes();
+    expect(serviceComponentHost.getServiceComponentName()).andReturn(componentName).anyTimes();
+    expect(serviceComponentHost.getHostName()).andReturn(hostname).anyTimes();
+    expect(serviceComponentHost.getHost()).andReturn(host).anyTimes();
+    expect(serviceComponentHost.getComponentAdminState()).andReturn(HostComponentAdminState.INSERVICE).anyTimes();
+
+    return serviceComponentHost;
+  }
+
+  private Service createMockService(String name, Map<String, ServiceComponent> components, StackId desiredStackId) {
+    Service service = createMock(Service.class);
+    expect(service.getName()).andReturn(name).anyTimes();
+    expect(service.getServiceComponents()).andReturn(components).anyTimes();
+    expect(service.getDesiredStackId()).andReturn(desiredStackId).anyTimes();
+    return service;
+  }
+
+  private Clusters createMockClusters(Injector injector, Cluster cluster) throws AmbariException {
+    Clusters clusters = injector.getInstance(Clusters.class);
+    expect(clusters.getCluster(CLUSTER_NAME)).andReturn(cluster).atLeastOnce();
+    return clusters;
+  }
+
+  private Cluster createMockCluster(SecurityType securityType, Collection<Host> hosts,
+                                    Map<String, Service> services,
+                                    Map<String, List<ServiceComponentHost>> serviceComponentHosts,
+                                    StackId currentStackId, final Map<String, Config> clusterConfigs) {
+    final Cluster cluster = createMock(Cluster.class);
+    expect(cluster.getSecurityType()).andReturn(securityType).anyTimes();
+    expect(cluster.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
+    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+    expect(cluster.getHosts()).andReturn(hosts).anyTimes();
+    expect(cluster.getServices()).andReturn(services).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(currentStackId).anyTimes();
+
+    for (Map.Entry<String, List<ServiceComponentHost>> entry : serviceComponentHosts.entrySet()) {
+      expect(cluster.getServiceComponentHosts(entry.getKey())).andReturn(entry.getValue()).atLeastOnce();
+    }
+
+    expect(cluster.getServiceComponentHostMap(null, new HashSet<>(Arrays.asList("HDFS", "ZOOKEEPER", "YARN", "KNOX"))))
+        .andReturn(null)
+        .anyTimes();
+    expect(cluster.getServiceComponentHostMap(null, new HashSet<>(Arrays.asList("HDFS", "ZOOKEEPER", "YARN"))))
+        .andReturn(null)
+        .anyTimes();
+
+    Map<String, String> configTypeService = new HashMap<>();
+    configTypeService.put("hdfs-site", "HDFS");
+    configTypeService.put("core-site", "HDFS");
+    configTypeService.put("hadoop-env", "HDFS");
+    configTypeService.put("cluster-env", null);
+    configTypeService.put("kerberos-env", "KERBEROS");
+    configTypeService.put("ranger-hdfs-audit", "RANGER");
+    configTypeService.put("zookeeper-env", "ZOOKEEPER");
+    configTypeService.put("gateway-site", "KNOX");
+
+    for (Map.Entry<String, String> entry : configTypeService.entrySet()) {
+      expect(cluster.getServiceByConfigType(entry.getKey())).andReturn(entry.getValue()).anyTimes();
+    }
+
+    for (Map.Entry<String, Config> entry : clusterConfigs.entrySet()) {
+      expect(cluster.getDesiredConfigByType(entry.getKey())).andReturn(entry.getValue()).anyTimes();
+      expect(cluster.getConfigsByType(entry.getKey())).andReturn(Collections.singletonMap(entry.getKey(), entry.getValue())).anyTimes();
+      expect(cluster.getConfigPropertiesTypes(entry.getKey())).andReturn(Collections.<PropertyInfo.PropertyType, Set<String>>emptyMap()).anyTimes();
+    }
+
+    return cluster;
+  }
+
+  private Config createMockConfig(Map<String, String> properties) {
+    Config config = createMock(Config.class);
+    expect(config.getProperties()).andReturn(properties).anyTimes();
+    expect(config.getPropertiesAttributes()).andReturn(Collections.<String, Map<String, String>>emptyMap()).anyTimes();
+    return config;
+  }
+
+  private Map<String, String> getDefaultCommandParams() {
+    Map<String, String> commandParams = new HashMap<>();
+    commandParams.put("clusterName", CLUSTER_NAME);
+    commandParams.put(UPGRADE_DIRECTION_KEY, Direction.UPGRADE.name());
+    return commandParams;
+  }
+
+  private ExecutionCommand createMockExecutionCommand(Map<String, String> commandParams) {
+    ExecutionCommand executionCommand = createMock(ExecutionCommand.class);
+    expect(executionCommand.getCommandParams()).andReturn(commandParams).atLeastOnce();
+    return executionCommand;
+  }
+
+  private Injector getInjector() {
+    return Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(EntityManager.class).toInstance(createMock(EntityManager.class));
+        bind(DBAccessor.class).toInstance(createMock(DBAccessor.class));
+        bind(UpgradeContextFactory.class).toInstance(createMock(UpgradeContextFactory.class));
+        bind(OsFamily.class).toInstance(createMock(OsFamily.class));
+        bind(StackManagerFactory.class).toInstance(createMock(StackManagerFactory.class));
+        bind(StageFactory.class).toInstance(createMock(StageFactory.class));
+        bind(AmbariMetaInfo.class).toInstance(createMock(AmbariMetaInfo.class));
+        bind(AmbariCustomCommandExecutionHelper.class).toInstance(createMock(AmbariCustomCommandExecutionHelper.class));
+        bind(ActionManager.class).toInstance(createMock(ActionManager.class));
+        bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
+        bind(AuditLogger.class).toInstance(createNiceMock(AuditLogger.class));
+        bind(ArtifactDAO.class).toInstance(createNiceMock(ArtifactDAO.class));
+        bind(KerberosPrincipalDAO.class).toInstance(createNiceMock(KerberosPrincipalDAO.class));
+        bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class);
+        bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
+        bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class);
+        bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
+        bind(RoleGraphFactory.class).toInstance(createMock(RoleGraphFactory.class));
+        bind(RequestFactory.class).toInstance(createMock(RequestFactory.class));
+        bind(RequestExecutionFactory.class).toInstance(createMock(RequestExecutionFactory.class));
+        bind(CredentialStoreService.class).toInstance(createMock(CredentialStoreService.class));
+        bind(TopologyManager.class).toInstance(createNiceMock(TopologyManager.class));
+        bind(ConfigFactory.class).toInstance(createMock(ConfigFactory.class));
+        bind(PersistedState.class).toInstance(createMock(PersistedState.class));
+        bind(ConfigureClusterTaskFactory.class).toInstance(createNiceMock(ConfigureClusterTaskFactory.class));
+        bind(Configuration.class).toInstance(new Configuration(new Properties()));
+
+        bind(AmbariManagementController.class).toInstance(createMock(AmbariManagementController.class));
+        bind(KerberosHelper.class).to(KerberosHelperImpl.class);
+        bind(Clusters.class).toInstance(createMock(Clusters.class));
+        bind(StackAdvisorHelper.class).toInstance(createMock(StackAdvisorHelper.class));
+        bind(ConfigHelper.class).toInstance(createMock(ConfigHelper.class));
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json b/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json
new file mode 100644
index 0000000..2a744c7
--- /dev/null
+++ b/ambari-server/src/test/resources/PreconfigureActionTest_cluster_config.json
@@ -0,0 +1,110 @@
+{
+  "core-site": {
+    "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+    "fs.trash.interval": "360",
+    "ha.failover-controller.active-standby-elector.zk.op.retries": "120",
+    "hadoop.custom-extensions.root": "/hdp/ext/{{major_stack_version}}/hadoop",
+    "hadoop.http.authentication.simple.anonymous.allowed": "true",
+    "hadoop.proxyuser.hdfs.groups": "*",
+    "hadoop.proxyuser.hdfs.hosts": "*",
+    "hadoop.proxyuser.root.groups": "*",
+    "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org",
+    "hadoop.security.auth_to_local": "DEFAULT",
+    "hadoop.security.authentication": "simple",
+    "hadoop.security.authorization": "false",
+    "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+    "io.file.buffer.size": "131072",
+    "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+    "ipc.client.connect.max.retries": "50",
+    "ipc.client.connection.maxidletime": "30000",
+    "ipc.client.idlethreshold": "8000",
+    "ipc.server.tcpnodelay": "true",
+    "mapreduce.jobtracker.webinterface.trusted": "false",
+    "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py"
+  },
+  "hadoop-env": {
+    "dtnode_heapsize": "1024m",
+    "hadoop_heapsize": "1024",
+    "hadoop_pid_dir_prefix": "/var/run/hadoop",
+    "hadoop_root_logger": "INFO,RFA",
+    "hdfs_log_dir_prefix": "/var/log/hadoop",
+    "hdfs_principal_name": "hdfs-c1@EXAMPLE.COM",
+    "hdfs_tmp_dir": "/tmp",
+    "hdfs_user": "hdfs",
+    "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+    "hdfs_user_nofile_limit": "128000",
+    "hdfs_user_nproc_limit": "65536",
+    "keyserver_host": " ",
+    "keyserver_port": "",
+    "namenode_backup_dir": "/tmp/upgrades",
+    "namenode_heapsize": "1024m",
+    "namenode_opt_maxnewsize": "128m",
+    "namenode_opt_maxpermsize": "256m",
+    "namenode_opt_newsize": "128m",
+    "namenode_opt_permsize": "128m",
+    "nfsgateway_heapsize": "1024",
+    "proxyuser_group": "users"
+  },
+  "cluster-env" : {
+    "agent_mounts_ignore_list": "",
+    "alerts_repeat_tolerance": "1",
+    "ambari_principal_name": "ambari-server-c1@EXAMPLE.COM",
+    "enable_external_ranger": "false",
+    "fetch_nonlocal_groups": "true",
+    "hide_yarn_memory_widget": "false",
+    "ignore_bad_mounts": "false",
+    "ignore_groupsusers_create": "false",
+    "kerberos_domain": "EXAMPLE.COM",
+    "manage_dirs_on_root": "true",
+    "managed_hdfs_resource_property_names": "",
+    "one_dir_per_partition": "false",
+    "override_uid": "true",
+    "recovery_enabled": "false",
+    "recovery_lifetime_max_count": "1024",
+    "recovery_max_count": "6",
+    "recovery_retry_interval": "5",
+    "recovery_type": "AUTO_START",
+    "recovery_window_in_minutes": "60",
+    "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}",
+    "security_enabled": "true",
+    "smokeuser": "ambari-qa",
+    "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+    "smokeuser_principal_name": "ambari-qa-c1@EXAMPLE.COM",
+    "stack_name": "HDP",
+    "sysprep_skip_copy_fast_jar_hdfs": "false",
+    "sysprep_skip_copy_oozie_share_lib_to_hdfs": "false",
+    "sysprep_skip_copy_tarballs_hdfs": "false",
+    "sysprep_skip_create_users_and_groups": "false",
+    "sysprep_skip_setup_jce": "false",
+    "user_group": "hadoop"
+  },
+  "kerberos-env" : {
+    "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}",
+    "admin_server_host": "c6401",
+    "case_insensitive_username_rules": "false",
+    "container_dn": "",
+    "create_ambari_principal": "true",
+    "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
+    "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
+    "group": "ambari-managed-principals",
+    "install_packages": "true",
+    "kdc_create_attributes": "",
+    "kdc_hosts": "c6401",
+    "kdc_type": "mit-kdc",
+    "ldap_url": "",
+    "manage_auth_to_local": "true",
+    "manage_identities": "true",
+    "master_kdc": "",
+    "password_chat_timeout": "5",
+    "password_length": "20",
+    "password_min_digits": "1",
+    "password_min_lowercase_letters": "1",
+    "password_min_punctuation": "1",
+    "password_min_uppercase_letters": "1",
+    "password_min_whitespace": "0",
+    "preconfigure_services": "DEFAULT",
+    "realm": "EXAMPLE.COM",
+    "service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
+    "set_password_expiry": "false"
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5230d935/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json b/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json
new file mode 100644
index 0000000..ddd00bd
--- /dev/null
+++ b/ambari-server/src/test/resources/PreconfigureActionTest_kerberos_descriptor_stack.json
@@ -0,0 +1,713 @@
+{
+  "identities": [
+    {
+      "keytab": {
+        "configuration": "cluster-env/smokeuser_keytab",
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        },
+        "owner": {
+          "access": "r",
+          "name": "${cluster-env/smokeuser}"
+        }
+      },
+      "name": "smokeuser",
+      "principal": {
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}",
+        "type": "user",
+        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}"
+      }
+    },
+    {
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        },
+        "owner": {
+          "access": "r",
+          "name": "root"
+        }
+      },
+      "name": "spnego",
+      "principal": {
+        "configuration": null,
+        "local_username": null,
+        "type": "service",
+        "value": "HTTP/_HOST@${realm}"
+      }
+    }
+  ],
+  "services": [
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "group": {},
+                "owner": {
+                  "access": "r"
+                }
+              },
+              "name": "ambari-server",
+              "principal": {
+                "configuration": "cluster-env/ambari_principal_name",
+                "local_username": null,
+                "type": "user",
+                "value": "ambari-server${principal_suffix}@${realm}"
+              }
+            },
+            {
+              "name": "ambari-server_spnego",
+              "reference": "/spnego"
+            }
+          ],
+          "name": "AMBARI_SERVER"
+        }
+      ],
+      "name": "AMBARI"
+    },
+    {
+      "auth_to_local_properties": [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "components": [
+        {
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.datanode.address": "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.datanode.keytab.file",
+                "file": "${keytab_dir}/dn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "datanode_dn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "dn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "DATANODE"
+        },
+        {
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ],
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file",
+                "file": "${keytab_dir}/jn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "journalnode_jn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "jn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "JOURNALNODE"
+        },
+        {
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": "${keytab_dir}/nn.service.keytab"
+              },
+              "name": "/HDFS/NAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": "nn/_HOST@${realm}"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hadoop-env/hdfs_user_keytab",
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "hdfs",
+              "principal": {
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "user",
+                "value": "${hadoop-env/hdfs_user}${principal_suffix}@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.namenode.keytab.file",
+                "file": "${keytab_dir}/nn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "namenode_nn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NAMENODE"
+        },
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hdfs-site/nfs.keytab.file",
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "nfsgateway",
+              "principal": {
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nfs/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NFS_GATEWAY"
+        },
+        {
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file",
+                "file": "${keytab_dir}/nn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${hadoop-env/hdfs_user}"
+                }
+              },
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username": "${hadoop-env/hdfs_user}",
+                "type": "service",
+                "value": "nn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "SECONDARY_NAMENODE"
+        }
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "ha.zookeeper.acl": "sasl:nn:rwcda",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+          }
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab",
+            "file": "${keytab_dir}/spnego.service.keytab"
+          },
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal",
+            "local_username": null,
+            "type": null,
+            "value": "HTTP/_HOST@${realm}"
+          }
+        }
+      ],
+      "name": "HDFS"
+    },
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.keytab",
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "yarn/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file",
+                "file": null
+              },
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal",
+                "local_username": null,
+                "type": null,
+                "value": null
+              },
+              "when": {
+                "contains": [
+                  "services",
+                  "HIVE"
+                ]
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file",
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "group": {
+                  "access": "r",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "llap_zk_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal",
+                "local_username": null,
+                "type": "service",
+                "value": "hive/_HOST@${realm}"
+              },
+              "when": {
+                "contains": [
+                  "services",
+                  "HIVE"
+                ]
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.keytab",
+                "file": "${keytab_dir}/nm.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "nodemanager_nm",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "nm/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "NODEMANAGER"
+        },
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": "${keytab_dir}/rm.service.keytab"
+              },
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": "rm/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file",
+                "file": "${keytab_dir}/spnego.service.keytab"
+              },
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal",
+                "local_username": null,
+                "type": null,
+                "value": "HTTP/_HOST@${realm}"
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.keytab",
+                "file": "${keytab_dir}/rm.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${yarn-env/yarn_user}"
+                }
+              },
+              "name": "resource_manager_rm",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}",
+                "type": "service",
+                "value": "rm/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "RESOURCEMANAGER"
+        }
+      ],
+      "configurations": [
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+          }
+        },
+        {
+          "yarn-site": {
+            "hadoop.registry.client.auth": "kerberos",
+            "hadoop.registry.jaas.context": "Client",
+            "hadoop.registry.secure": "true",
+            "hadoop.registry.system.accounts": "sasl:${principals/YARN/APP_TIMELINE_SERVER/app_timeline_server_yarn|principalPrimary()},sasl:${principals/MAPREDUCE2/HISTORYSERVER/history_server_jhs|principalPrimary()},sasl:${principals/HDFS/NAMENODE/hdfs|principalPrimary()},sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()},sasl:${principals/HIVE/HIVE_SERVER/hive_server_hive|principalPrimary()}",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${activity-conf/global.activity.analyzer.user},dr.who,${yarn-env/yarn_user}",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.zk-acl": "sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()}:rwcda",
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.type": "kerberos"
+          }
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "/spnego"
+        }
+      ],
+      "name": "YARN"
+    },
+    {
+      "components": [
+        {
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "gateway-site": {
+                "gateway.hadoop.kerberos.secured": "true",
+                "java.security.krb5.conf": "/etc/krb5.conf"
+              }
+            },
+            {
+              "oozie-site": {
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "ranger-knox-audit": {
+                "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
+                "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+                "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+                "xasecure.audit.jaas.Client.option.serviceName": "solr",
+                "xasecure.audit.jaas.Client.option.storeKey": "false",
+                "xasecure.audit.jaas.Client.option.useKeyTab": "true"
+              }
+            },
+            {
+              "webhcat-site": {
+                "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            }
+          ],
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.keyTab",
+                "file": null
+              },
+              "name": "/KNOX/KNOX_GATEWAY/knox_principal",
+              "principal": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.principal",
+                "local_username": null,
+                "type": null,
+                "value": null
+              }
+            },
+            {
+              "keytab": {
+                "configuration": "knox-env/knox_keytab_path",
+                "file": "${keytab_dir}/knox.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${knox-env/knox_user}"
+                }
+              },
+              "name": "knox_principal",
+              "principal": {
+                "configuration": "knox-env/knox_principal_name",
+                "local_username": "${knox-env/knox_user}",
+                "type": "service",
+                "value": "${knox-env/knox_user}/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "KNOX_GATEWAY"
+        }
+      ],
+      "name": "KNOX"
+    },
+    {
+      "components": [
+        {
+          "identities": [
+            {
+              "keytab": {
+                "configuration": "zookeeper-env/zookeeper_keytab_path",
+                "file": "${keytab_dir}/zk.service.keytab",
+                "group": {
+                  "access": "",
+                  "name": "${cluster-env/user_group}"
+                },
+                "owner": {
+                  "access": "r",
+                  "name": "${zookeeper-env/zk_user}"
+                }
+              },
+              "name": "zookeeper_zk",
+              "principal": {
+                "configuration": "zookeeper-env/zookeeper_principal_name",
+                "local_username": null,
+                "type": "service",
+                "value": "zookeeper/_HOST@${realm}"
+              }
+            }
+          ],
+          "name": "ZOOKEEPER_SERVER"
+        }
+      ],
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "name": "ZOOKEEPER"
+    }
+  ],
+  "properties": {
+    "additional_realms": "",
+    "keytab_dir": "/etc/security/keytabs",
+    "principal_suffix": "-${cluster_name|toLower()}",
+    "realm": "EXAMPLE.COM"
+  }
+}
\ No newline at end of file