You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@oozie.apache.org by vi...@apache.org on 2013/02/25 22:42:09 UTC

svn commit: r1449911 [7/8] - in /oozie/trunk: ./ client/src/main/java/org/apache/oozie/cli/ client/src/main/java/org/apache/oozie/client/ client/src/main/java/org/apache/oozie/client/rest/ client/src/test/java/org/apache/oozie/client/rest/ core/ core/s...

Modified: oozie/trunk/core/src/test/java/org/apache/oozie/service/TestRecoveryService.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/service/TestRecoveryService.java?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/service/TestRecoveryService.java (original)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/service/TestRecoveryService.java Mon Feb 25 21:42:07 2013
@@ -25,6 +25,8 @@ import java.io.PrintWriter;
 import java.io.Reader;
 import java.io.StringReader;
 import java.io.Writer;
+import java.net.URI;
+import java.util.Collection;
 import java.util.Date;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
@@ -52,9 +54,11 @@ import org.apache.oozie.client.WorkflowJ
 import org.apache.oozie.client.CoordinatorJob.Execution;
 import org.apache.oozie.command.wf.ActionXCommand;
 import org.apache.oozie.command.wf.ActionXCommand.ActionExecutorContext;
+import org.apache.oozie.coord.CoordELFunctions;
+import org.apache.oozie.dependency.FSURIHandler;
+import org.apache.oozie.dependency.HCatURIHandler;
 import org.apache.oozie.executor.jpa.CoordActionGetJPAExecutor;
 import org.apache.oozie.executor.jpa.CoordActionInsertJPAExecutor;
-import org.apache.oozie.executor.jpa.CoordJobInsertJPAExecutor;
 import org.apache.oozie.executor.jpa.JPAExecutorException;
 import org.apache.oozie.executor.jpa.WorkflowActionGetJPAExecutor;
 import org.apache.oozie.executor.jpa.WorkflowActionInsertJPAExecutor;
@@ -65,6 +69,7 @@ import org.apache.oozie.store.StoreExcep
 import org.apache.oozie.store.WorkflowStore;
 import org.apache.oozie.test.XDataTestCase;
 import org.apache.oozie.util.DateUtils;
+import org.apache.oozie.util.HCatURI;
 import org.apache.oozie.util.IOUtils;
 import org.apache.oozie.util.XConfiguration;
 import org.apache.oozie.util.XLog;
@@ -73,15 +78,18 @@ import org.apache.oozie.workflow.Workflo
 
 public class TestRecoveryService extends XDataTestCase {
     private Services services;
+    private String server;
 
     @Override
     protected void setUp() throws Exception {
         super.setUp();
+        server = getMetastoreAuthority();
         setSystemProperty(SchemaService.WF_CONF_EXT_SCHEMAS, "wf-ext-schema.xsd");
         services = new Services();
         services.init();
         cleanUpDBTables();
         services.get(ActionService.class).register(ForTestingActionExecutor.class);
+
     }
 
     @Override
@@ -186,9 +194,9 @@ public class TestRecoveryService extends
         store3.commitTrx();
         store3.closeTrx();
     }
-    
+
     /**
-     * Tests functionality of the Recovery Service Runnable command. </p> Starts an action with USER_RETRY status. 
+     * Tests functionality of the Recovery Service Runnable command. </p> Starts an action with USER_RETRY status.
      * Runs the recovery runnable, and ensures the state changes to OK and the job completes successfully.
      *
      * @throws Exception
@@ -197,11 +205,11 @@ public class TestRecoveryService extends
         final JPAService jpaService = Services.get().get(JPAService.class);
         WorkflowJobBean job = this.addRecordToWfJobTable(WorkflowJob.Status.RUNNING, WorkflowInstance.Status.RUNNING);
         WorkflowActionBean action = this.addRecordToWfActionTable(job.getId(), "1", WorkflowAction.Status.USER_RETRY);
-        
+
         Runnable recoveryRunnable = new RecoveryRunnable(0, 60, 60);
         recoveryRunnable.run();
         sleep(3000);
-        
+
         final WorkflowActionGetJPAExecutor wfActionGetCmd = new WorkflowActionGetJPAExecutor(action.getId());
 
         waitFor(5000, new Predicate() {
@@ -233,7 +241,7 @@ public class TestRecoveryService extends
         assertTrue(launcherJob.isSuccessful());
         assertTrue(LauncherMapper.hasIdSwap(launcherJob));
     }
-    
+
 
     /**
      * Tests functionality of the Recovery Service Runnable command. </p> Insert a coordinator job with RUNNING and
@@ -294,11 +302,8 @@ public class TestRecoveryService extends
      */
     public void testCoordActionRecoveryServiceForWaiting() throws Exception {
 
-        String currentDatePlusMonth = XDataTestCase.getCurrentDateafterIncrementingInMonths(1);
-        Date startTime = DateUtils.parseDateOozieTZ(currentDatePlusMonth);
-        Date endTime = DateUtils.parseDateOozieTZ(currentDatePlusMonth);
         CoordinatorJobBean job = addRecordToCoordJobTableForWaiting("coord-job-for-action-input-check.xml",
-                CoordinatorJob.Status.RUNNING, startTime, endTime, false, true, 0);
+                CoordinatorJob.Status.RUNNING, false, true);
 
         CoordinatorActionBean action = addRecordToCoordActionTableForWaiting(job.getId(), 1,
                 CoordinatorAction.Status.WAITING, "coord-action-for-action-input-check.xml");
@@ -332,6 +337,72 @@ public class TestRecoveryService extends
         }
     }
 
+
+    public void testCoordActionRecoveryServiceForWaitingRegisterPartition() throws Exception {
+        services.destroy();
+        services = super.setupServicesForHCatalog();
+        services.getConf().set(URIHandlerService.URI_HANDLERS,
+                FSURIHandler.class.getName() + "," + HCatURIHandler.class.getName());
+        services.getConf().setLong(RecoveryService.CONF_PUSH_DEPENDENCY_INTERVAL, 1);
+        services.init();
+
+        String db = "default";
+        String table = "tablename";
+
+        // dep1 is not available and dep2 is available
+        String newHCatDependency1 = "hcat://" + server + "/" + db + "/" + table + "/dt=20120430;country=brazil";
+        String newHCatDependency2 = "hcat://" + server + "/" + db + "/" + table + "/dt=20120430;country=usa";
+        String newHCatDependency = newHCatDependency1 + CoordELFunctions.INSTANCE_SEPARATOR + newHCatDependency2;
+
+        HCatAccessorService hcatService = services.get(HCatAccessorService.class);
+        JMSAccessorService jmsService = services.get(JMSAccessorService.class);
+        assertFalse(jmsService.isListeningToTopic(hcatService.getJMSConnectionInfo(new URI(newHCatDependency1)), db
+                + "." + table));
+
+        populateTable(db, table);
+        String actionId = addInitRecords(newHCatDependency);
+        CoordinatorAction ca = checkCoordActionDependencies(actionId, newHCatDependency);
+        assertEquals(CoordinatorAction.Status.WAITING, ca.getStatus());
+        sleep(2000);
+        Runnable recoveryRunnable = new RecoveryRunnable(0, 1, 1);
+        recoveryRunnable.run();
+        sleep(2000);
+
+        // Recovery service should have discovered newHCatDependency2 and JMS Connection should exist
+        // and newHCatDependency1 should be in PDMS waiting list
+        assertTrue(jmsService.isListeningToTopic(hcatService.getJMSConnectionInfo(new URI(newHCatDependency2)), "hcat."
+                + db + "." + table));
+        checkCoordActionDependencies(actionId, newHCatDependency1);
+
+        PartitionDependencyManagerService pdms = services.get(PartitionDependencyManagerService.class);
+        assertNull(pdms.getWaitingActions(new HCatURI(newHCatDependency2)));
+        Collection<String> waitingActions = pdms.getWaitingActions(new HCatURI(newHCatDependency1));
+        assertEquals(1, waitingActions.size());
+        assertTrue(waitingActions.contains(actionId));
+    }
+
+    private void populateTable(String db, String table) throws Exception {
+        dropTable(db, table, true);
+        dropDatabase(db, true);
+        createDatabase(db);
+        createTable(db, table, "dt,country");
+        addPartition(db, table, "dt=20120430;country=usa");
+        addPartition(db, table, "dt=20120412;country=brazil");
+        addPartition(db, table, "dt=20120413;country=brazil");
+    }
+
+    private CoordinatorActionBean checkCoordActionDependencies(String actionId, String expDeps) throws Exception {
+        try {
+            JPAService jpaService = Services.get().get(JPAService.class);
+            CoordinatorActionBean action = jpaService.execute(new CoordActionGetJPAExecutor(actionId));
+            assertEquals(expDeps, action.getPushMissingDependencies());
+            return action;
+        }
+        catch (JPAExecutorException se) {
+            throw new Exception("Action ID " + actionId + " was not stored properly in db");
+        }
+    }
+
     /**
      * Tests functionality of the Recovery Service Runnable command. </p> Insert a coordinator job with SUSPENDED and
      * action with SUSPENDED and workflow with RUNNING. Then, runs the recovery runnable and ensures the workflow status changes to SUSPENDED.
@@ -441,29 +512,6 @@ public class TestRecoveryService extends
         assertEquals(WorkflowJob.Status.RUNNING, ret.getStatus());
     }
 
-    protected CoordinatorJobBean addRecordToCoordJobTableForWaiting(String testFileName, CoordinatorJob.Status status, Date start, Date end,
-            boolean pending, boolean doneMatd, int lastActionNum) throws Exception {
-
-        String testDir = getTestCaseDir();
-        CoordinatorJobBean coordJob = createCoordJob(testFileName, status, start, end, pending, doneMatd, lastActionNum);
-        String appXml = getCoordJobXmlForWaiting(testFileName, testDir);
-        coordJob.setJobXml(appXml);
-
-        try {
-            JPAService jpaService = Services.get().get(JPAService.class);
-            assertNotNull(jpaService);
-            CoordJobInsertJPAExecutor coordInsertCmd = new CoordJobInsertJPAExecutor(coordJob);
-            jpaService.execute(coordInsertCmd);
-        }
-        catch (JPAExecutorException je) {
-            je.printStackTrace();
-            fail("Unable to insert the test coord job record to table");
-            throw je;
-        }
-
-        return coordJob;
-    }
-
     protected CoordinatorActionBean addRecordToCoordActionTableForWaiting(String jobId, int actionNum,
             CoordinatorAction.Status status, String resourceXmlName) throws Exception {
         CoordinatorActionBean action = createCoordAction(jobId, actionNum, status, resourceXmlName, 0);
@@ -500,18 +548,6 @@ public class TestRecoveryService extends
         }
     }
 
-    protected String getCoordJobXmlForWaiting(String testFileName, String testDir) {
-        try {
-            Reader reader = IOUtils.getResourceAsReader(testFileName, -1);
-            String appXml = IOUtils.getReaderAsString(reader, -1);
-            appXml = appXml.replaceAll("#testDir", testDir);
-            return appXml;
-        }
-        catch (IOException ioe) {
-            throw new RuntimeException(XLog.format("Could not get "+ testFileName, ioe));
-        }
-    }
-
     private void addRecordToActionTable(String jobId, int actionNum, String actionId, CoordinatorStore store, String baseDir) throws StoreException, IOException {
         CoordinatorActionBean action = new CoordinatorActionBean();
         action.setJobId(jobId);
@@ -699,7 +735,7 @@ public class TestRecoveryService extends
             throw se;
         }
     }
-    
+
     @Override
     protected WorkflowActionBean addRecordToWfActionTable(String wfId, String actionName, WorkflowAction.Status status)
             throws Exception {

Modified: oozie/trunk/core/src/test/java/org/apache/oozie/servlet/TestJobsServlet.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/servlet/TestJobsServlet.java?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/servlet/TestJobsServlet.java (original)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/servlet/TestJobsServlet.java Mon Feb 25 21:42:07 2013
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *      http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

Added: oozie/trunk/core/src/test/java/org/apache/oozie/test/MiniHCatServer.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/test/MiniHCatServer.java?rev=1449911&view=auto
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/test/MiniHCatServer.java (added)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/test/MiniHCatServer.java Mon Feb 25 21:42:07 2013
@@ -0,0 +1,301 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.oozie.test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hcatalog.api.HCatAddPartitionDesc;
+import org.apache.hcatalog.api.HCatClient;
+import org.apache.hcatalog.api.HCatClient.DropDBMode;
+import org.apache.hcatalog.api.HCatCreateDBDesc;
+import org.apache.hcatalog.api.HCatCreateTableDesc;
+import org.apache.hcatalog.api.HCatPartition;
+import org.apache.hcatalog.common.HCatConstants;
+import org.apache.hcatalog.data.schema.HCatFieldSchema;
+import org.apache.hcatalog.data.schema.HCatFieldSchema.Type;
+import org.apache.oozie.util.HCatURI;
+import org.apache.oozie.util.XLog;
+import org.junit.Assert;
+
+public class MiniHCatServer {
+
+    public static enum RUNMODE {
+        LOCAL, SERVER
+    };
+
+    private static XLog LOG = XLog.getLog(MiniHCatServer.class);
+    private static final Random RANDOM = new Random();
+    private RUNMODE mode;
+    private Configuration hadoopConf;
+    private int msPort;
+    private HiveConf hiveConf;
+    private HCatClient hcatClient;
+    private Thread serverThread;
+    private Map<String, String> sysProps;
+
+    public MiniHCatServer(RUNMODE mode, Configuration hadoopConf) throws Exception {
+        this.mode = mode;
+        this.hadoopConf = hadoopConf;
+        sysProps = new HashMap<String, String>();
+    }
+
+    public void start() throws Exception {
+        if (mode.equals(RUNMODE.LOCAL)) {
+            initLocalMetastoreConf();
+        }
+        else {
+            this.msPort = RANDOM.nextInt(100) + 30000;
+            startMetastoreServer();
+            initMetastoreServerConf();
+        }
+        hcatClient = HCatClient.create(hiveConf);
+        resetDefaultDBCreation();
+    }
+
+    public void shutdown() throws Exception {
+        resetSystemProperties();
+        hcatClient.close();
+        if (mode.equals(RUNMODE.SERVER)) {
+            // No clean way to stop hive metastore server.
+            serverThread.stop();
+        }
+    }
+
+    private void initLocalMetastoreConf() throws IOException {
+        hiveConf = new HiveConf(hadoopConf, this.getClass());
+        hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new File("target/warehouse").getAbsolutePath());
+        hiveConf.set("hive.metastore.local", "true"); // For hive 0.9
+        hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:target/metastore_db;create=true");
+
+        setSystemProperty(HiveConf.ConfVars.METASTORE_MODE.varname, "true");
+        setSystemProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new File("target/warehouse").getAbsolutePath());
+        setSystemProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
+                "jdbc:derby:target/metastore_db;create=true");
+        File derbyLogFile = new File("target/derby.log");
+        derbyLogFile.createNewFile();
+        setSystemProperty("derby.stream.error.file", derbyLogFile.getPath());
+    }
+
+    private void initMetastoreServerConf() throws Exception {
+
+        hiveConf = new HiveConf(hadoopConf, this.getClass());
+        hiveConf.set("hive.metastore.local", "false"); // For hive 0.9
+        hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+    }
+
+    private void startMetastoreServer() throws Exception {
+        final HiveConf serverConf = new HiveConf(hadoopConf, this.getClass());
+        serverConf.set("hive.metastore.local", "false");
+        serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:target/metastore_db;create=true");
+        //serverConf.set(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, NotificationListener.class.getName());
+        File derbyLogFile = new File("target/derby.log");
+        derbyLogFile.createNewFile();
+        setSystemProperty("derby.stream.error.file", derbyLogFile.getPath());
+        serverThread = new Thread(new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    HiveMetaStore.startMetaStore(msPort, ShimLoader.getHadoopThriftAuthBridge(), serverConf);
+                    LOG.info("Started metastore server on port " + msPort);
+                }
+                catch (Throwable e) {
+                    LOG.error("Metastore Thrift Server threw an exception...", e);
+                }
+            }
+        });
+        serverThread.setDaemon(true);
+        serverThread.start();
+        Thread.sleep(10000L);
+    }
+
+    public static void resetDefaultDBCreation() throws Exception {
+        // Need to do this, else default db will not be created for local metastores.
+        // TestHiveMain will fail with InvalidObjectException(message:There is no database named default)
+        Field declaredField = HMSHandler.class.getDeclaredField("createDefaultDB");
+        declaredField.setAccessible(true);
+        declaredField.set(null, false);
+    }
+
+    public static void resetHiveConfStaticVariables() throws Exception {
+        // HiveConf initializes location of hive-site.xml in static block.
+        // So this is needed so that tests like TestHiveMain that create hive-site.xml don't fail.
+        Field declaredField = HiveConf.class.getDeclaredField("hiveSiteURL");
+        declaredField.setAccessible(true);
+        declaredField.set(null, HiveConf.class.getClassLoader().getResource("hive-site.xml"));
+    }
+
+    private void setSystemProperty(String name, String value) {
+        if (!sysProps.containsKey(name)) {
+            String currentValue = System.getProperty(name);
+            sysProps.put(name, currentValue);
+        }
+        if (value != null) {
+            System.setProperty(name, value);
+        }
+        else {
+            System.getProperties().remove(name);
+        }
+    }
+
+    private void resetSystemProperties() {
+        for (Map.Entry<String, String> entry : sysProps.entrySet()) {
+            if (entry.getValue() != null) {
+                System.setProperty(entry.getKey(), entry.getValue());
+            }
+            else {
+                System.getProperties().remove(entry.getKey());
+            }
+        }
+        sysProps.clear();
+    }
+
+    public Configuration getMetaStoreConf() {
+        return hiveConf;
+    }
+
+    public String getMetastoreAuthority() {
+        if (mode.equals(RUNMODE.SERVER)) {
+            return "localhost:" + msPort;
+        }
+        else {
+            return "unittest-local";
+        }
+    }
+
+    public String getMetastoreURI() {
+        return hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname);
+    }
+
+    public HCatClient getHCatClient() {
+        return hcatClient;
+    }
+
+    public URI getHCatURI(String db, String table, String partitions) throws URISyntaxException {
+        StringBuilder uri = new StringBuilder();
+        uri.append("hcat://").append(getMetastoreAuthority()).append("/").append(db).append("/").append(table)
+                .append("/").append(partitions);
+        return new URI(uri.toString());
+    }
+
+    public void createDatabase(String db, String location) throws Exception {
+        HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(db).ifNotExists(true).location(location).build();
+        hcatClient.createDatabase(dbDesc);
+        List<String> dbNames = hcatClient.listDatabaseNamesByPattern(db);
+        Assert.assertTrue(dbNames.contains(db));
+    }
+
+    public void createTable(String db, String table, String partitionCols) throws Exception {
+        List<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
+        cols.add(new HCatFieldSchema("userid", Type.INT, "userid"));
+        cols.add(new HCatFieldSchema("viewtime", Type.BIGINT, "view time"));
+        cols.add(new HCatFieldSchema("pageurl", Type.STRING, "page url visited"));
+        cols.add(new HCatFieldSchema("ip", Type.STRING, "IP Address of the User"));
+        ArrayList<HCatFieldSchema> ptnCols = new ArrayList<HCatFieldSchema>();
+        for (String partitionCol : partitionCols.split(",")) {
+            ptnCols.add(new HCatFieldSchema(partitionCol, Type.STRING, null));
+        }
+        // Remove this once NotificationListener is fixed and available in HCat snapshot
+        Map<String, String> tblProps = new HashMap<String, String>();
+        tblProps.put(HCatConstants.HCAT_MSGBUS_TOPIC_NAME, "hcat." + db + "." + table);
+        HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(db, table, cols).fileFormat("textfile")
+                .partCols(ptnCols).tblProps(tblProps ).build();
+        hcatClient.createTable(tableDesc);
+        List<String> tables = hcatClient.listTableNamesByPattern(db, "*");
+        assertTrue(tables.contains(table));
+    }
+
+    public void dropDatabase(String db, boolean ifExists) throws Exception {
+        hcatClient.dropDatabase(db, ifExists, DropDBMode.CASCADE);
+        List<String> dbNames = hcatClient.listDatabaseNamesByPattern(db);
+        assertFalse(dbNames.contains(db));
+    }
+
+    public void dropTable(String db, String table, boolean ifExists) throws Exception {
+        hcatClient.dropTable(db, table, ifExists);
+        List<String> tables = hcatClient.listTableNamesByPattern(db, "*");
+        assertFalse(tables.contains(table));
+    }
+
+    public String getPartitionDir(String db, String table, String partitionSpec, String dbLocation) throws Exception {
+        String dir = dbLocation + "/" + db + "/" + table + "/"
+                + partitionSpec.replaceAll(HCatURI.PARTITION_SEPARATOR, "/");
+        return dir;
+    }
+
+    public String createPartitionDir(String db, String table, String partitionSpec, String dbLocation) throws Exception {
+        String dir = getPartitionDir(db, table, partitionSpec, dbLocation);
+        FileSystem.get(hadoopConf).mkdirs(new Path(dir));
+        return dir;
+    }
+
+    public void addPartition(String db, String table, String partitionSpec, String location) throws Exception {
+        String[] parts = partitionSpec.split(HCatURI.PARTITION_SEPARATOR);
+        Map<String, String> partitions = new HashMap<String, String>();
+        for (String part : parts) {
+            String[] split = part.split("=");
+            partitions.put(split[0], split[1]);
+        }
+        HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(db, table, location, partitions).build();
+        hcatClient.addPartition(addPtn);
+        assertNotNull(hcatClient.getPartition(db, table, partitions));
+    }
+
+    public void dropPartition(String db, String table, String partitionSpec) throws Exception {
+        String[] parts = partitionSpec.split(HCatURI.PARTITION_SEPARATOR);
+        Map<String, String> partitions = new HashMap<String, String>();
+        for (String part : parts) {
+            String[] split = part.split("=");
+            partitions.put(split[0], split[1]);
+        }
+        hcatClient.dropPartitions(db, table, partitions, false);
+    }
+
+    public List<HCatPartition> getPartitions(String db, String table, String partitionSpec) throws Exception {
+        String[] parts = partitionSpec.split(HCatURI.PARTITION_SEPARATOR);
+        Map<String, String> partitions = new HashMap<String, String>();
+        for (String part : parts) {
+            String[] split = part.split("=");
+            partitions.put(split[0], split[1]);
+        }
+        return hcatClient.getPartitions(db, table, partitions);
+    }
+
+}

Modified: oozie/trunk/core/src/test/java/org/apache/oozie/test/XDataTestCase.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/test/XDataTestCase.java?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/test/XDataTestCase.java (original)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/test/XDataTestCase.java Mon Feb 25 21:42:07 2013
@@ -80,7 +80,7 @@ import org.apache.oozie.workflow.lite.St
 import org.jdom.Element;
 import org.jdom.JDOMException;
 
-public abstract class XDataTestCase extends XFsTestCase {
+public abstract class XDataTestCase extends XHCatTestCase {
 
     protected static String slaXml = " <sla:info xmlns:sla='uri:oozie:sla:0.1'>"
             + " <sla:app-name>test-app</sla:app-name>" + " <sla:nominal-time>2009-03-06T10:00Z</sla:nominal-time>"
@@ -578,6 +578,7 @@ public abstract class XDataTestCase exte
             throw new IOException(e);
         }
         action.setLastModifiedTime(new Date());
+        action.setCreatedTime(new Date());
         action.setStatus(status);
         action.setActionXml(actionXml);
 
@@ -1198,6 +1199,18 @@ public abstract class XDataTestCase exte
         conf.set(Services.CONF_SERVICE_CLASSES, new String(builder));
     }
 
+   /**
+    * Add a particular service class to be run in addition to default ones
+    * @param conf
+    * @param serviceName
+    */
+   protected void addServiceToRun(Configuration conf, String serviceName) {
+       String classes = conf.get(Services.CONF_SERVICE_CLASSES);
+       StringBuilder builder = new StringBuilder(classes);
+       builder.append("," + serviceName);
+       conf.set(Services.CONF_SERVICE_CLASSES, new String(builder));
+   }
+
     /**
      * Adds the db records for the Bulk Monitor tests
      */
@@ -1270,4 +1283,65 @@ public abstract class XDataTestCase exte
         return DateUtils.formatDateOozieTZ(currentDate);
     }
 
+    protected String addInitRecords(String pushMissingDependencies) throws Exception {
+        CoordinatorJobBean job = addRecordToCoordJobTableForWaiting("coord-job-for-action-input-check.xml",
+                CoordinatorJob.Status.RUNNING, false, true);
+
+        CoordinatorActionBean action1 = addRecordToCoordActionTableForWaiting(job.getId(), 1,
+                CoordinatorAction.Status.WAITING, "coord-action-for-action-input-check.xml", pushMissingDependencies);
+        return action1.getId();
+    }
+
+    protected CoordinatorActionBean addRecordToCoordActionTableForWaiting(String jobId, int actionNum,
+            CoordinatorAction.Status status, String resourceXmlName, String pushMissingDependencies) throws Exception {
+        CoordinatorActionBean action = createCoordAction(jobId, actionNum, status, resourceXmlName, 0);
+        action.setPushMissingDependencies(pushMissingDependencies);
+        try {
+            JPAService jpaService = Services.get().get(JPAService.class);
+            assertNotNull(jpaService);
+            CoordActionInsertJPAExecutor coordActionInsertCmd = new CoordActionInsertJPAExecutor(action);
+            jpaService.execute(coordActionInsertCmd);
+        }
+        catch (JPAExecutorException je) {
+            je.printStackTrace();
+            fail("Unable to insert the test coord action record to table");
+            throw je;
+        }
+        return action;
+    }
+
+    protected CoordinatorJobBean addRecordToCoordJobTableForWaiting(String testFileName, CoordinatorJob.Status status,
+             boolean pending, boolean doneMatd) throws Exception {
+
+        String testDir = getTestCaseDir();
+        CoordinatorJobBean coordJob = createCoordJob(status, pending, doneMatd);
+        String appXml = getCoordJobXmlForWaiting(testFileName, testDir);
+        coordJob.setJobXml(appXml);
+
+        try {
+            JPAService jpaService = Services.get().get(JPAService.class);
+            assertNotNull(jpaService);
+            CoordJobInsertJPAExecutor coordInsertCmd = new CoordJobInsertJPAExecutor(coordJob);
+            jpaService.execute(coordInsertCmd);
+        }
+        catch (JPAExecutorException je) {
+            je.printStackTrace();
+            fail("Unable to insert the test coord job record to table");
+            throw je;
+        }
+
+        return coordJob;
+    }
+
+    protected String getCoordJobXmlForWaiting(String testFileName, String testDir) {
+        try {
+            Reader reader = IOUtils.getResourceAsReader(testFileName, -1);
+            String appXml = IOUtils.getReaderAsString(reader, -1);
+            appXml = appXml.replaceAll("#testDir", testDir);
+            return appXml;
+        }
+        catch (IOException ioe) {
+            throw new RuntimeException(XLog.format("Could not get " + testFileName, ioe));
+        }
+    }
 }

Added: oozie/trunk/core/src/test/java/org/apache/oozie/test/XHCatTestCase.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/test/XHCatTestCase.java?rev=1449911&view=auto
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/test/XHCatTestCase.java (added)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/test/XHCatTestCase.java Mon Feb 25 21:42:07 2013
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.oozie.test;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hcatalog.api.HCatPartition;
+import org.apache.oozie.util.HCatURI;
+
+/**
+ * Base JUnit <code>TestCase</code> subclass used by all Oozie testcases that
+ * need Hadoop FS access and HCat access.
+ */
+public abstract class XHCatTestCase extends XFsTestCase {
+
+    private MiniHCatServer hcatServer;
+
+    @Override
+    protected void setUp() throws Exception {
+        super.setUp();
+        hcatServer = super.getHCatalogServer();
+    }
+
+    @Override
+    protected void tearDown() throws Exception {
+        super.tearDown();
+    }
+
+    protected Configuration getMetaStoreConf() {
+        return hcatServer.getMetaStoreConf();
+    }
+
+    public String getMetastoreAuthority() {
+        return hcatServer.getMetastoreAuthority();
+    }
+
+    protected URI getHCatURI(String db, String table, String partitions) throws URISyntaxException {
+        return hcatServer.getHCatURI(db, table, partitions);
+    }
+
+    protected void createDatabase(String db) throws Exception {
+        if (db.equals("default"))
+            return;
+        hcatServer.createDatabase(db, getTestCaseDir());
+    }
+
+    protected void createTable(String db, String table, String partitionCols) throws Exception {
+        hcatServer.createTable(db, table, partitionCols);
+    }
+
+    protected void dropDatabase(String db, boolean ifExists) throws Exception {
+        if (db.equals("default"))
+            return;
+        hcatServer.dropDatabase(db, ifExists);
+    }
+
+    protected void dropTable(String db, String table, boolean ifExists) throws Exception {
+        hcatServer.dropTable(db, table, ifExists);
+    }
+
+    protected String getPartitionDir(String db, String table, String partitionSpec) throws Exception {
+        return hcatServer.getPartitionDir(db, table, partitionSpec, getTestCaseDir());
+    }
+
+    /**
+     * Add a partition to the table
+     * @param db database name
+     * @param table table name
+     * @param partitionSpec partition key value pairs separated by ;. For eg: year=2011;country=usa
+     * @return
+     * @throws Exception
+     */
+    protected String addPartition(String db, String table, String partitionSpec) throws Exception {
+        String location = hcatServer.createPartitionDir(db, table, partitionSpec, getTestCaseDir());
+        hcatServer.addPartition(db, table, partitionSpec, location);
+        return location;
+    }
+
+    protected void dropPartition(String db, String table, String partitionSpec) throws Exception {
+        hcatServer.dropPartition(db, table, partitionSpec);
+    }
+
+    public List<HCatPartition> getPartitions(String db, String table, String partitionSpec) throws Exception {
+        return hcatServer.getPartitions(db, table, partitionSpec);
+    }
+
+    protected Map<String, String> getPartitionMap(String partitionSpec) {
+        String[] parts = partitionSpec.split(HCatURI.PARTITION_SEPARATOR);
+        Map<String, String> partitions = new HashMap<String, String>();
+        for (String part : parts) {
+            String[] split = part.split("=");
+            partitions.put(split[0], split[1]);
+        }
+        return partitions;
+    }
+
+}

Modified: oozie/trunk/core/src/test/java/org/apache/oozie/test/XTestCase.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/test/XTestCase.java?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/test/XTestCase.java (original)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/test/XTestCase.java Mon Feb 25 21:42:07 2013
@@ -53,11 +53,19 @@ import org.apache.oozie.CoordinatorJobBe
 import org.apache.oozie.SLAEventBean;
 import org.apache.oozie.WorkflowActionBean;
 import org.apache.oozie.WorkflowJobBean;
+import org.apache.oozie.dependency.FSURIHandler;
+import org.apache.oozie.dependency.HCatURIHandler;
 import org.apache.oozie.service.ConfigurationService;
+import org.apache.oozie.service.HCatAccessorService;
 import org.apache.oozie.service.HadoopAccessorService;
+import org.apache.oozie.service.JMSAccessorService;
+import org.apache.oozie.service.PartitionDependencyManagerService;
+import org.apache.oozie.service.ServiceException;
 import org.apache.oozie.service.Services;
+import org.apache.oozie.service.URIHandlerService;
 import org.apache.oozie.store.CoordinatorStore;
 import org.apache.oozie.store.StoreException;
+import org.apache.oozie.test.MiniHCatServer.RUNMODE;
 import org.apache.oozie.util.IOUtils;
 import org.apache.oozie.util.ParamChecker;
 import org.apache.oozie.util.XLog;
@@ -89,6 +97,8 @@ public abstract class XTestCase extends 
     private static final String OOZIE_TEST_PROPERTIES = "oozie.test.properties";
 
     public static float WAITFOR_RATIO = Float.parseFloat(System.getProperty("oozie.test.waitfor.ratio", "1"));
+    protected static final String localActiveMQBroker = "vm://localhost?broker.persistent=false";
+    protected static final String ActiveMQConnFactory = "org.apache.activemq.jndi.ActiveMQInitialContextFactory";
 
     static {
         try {
@@ -304,6 +314,10 @@ public abstract class XTestCase extends 
             conf.writeXml(os);
             os.close();
         }
+
+        if (System.getProperty("oozie.test.metastore.server", "true").equals("true")) {
+            setupHCatalogServer();
+        }
     }
 
     /**
@@ -601,6 +615,10 @@ public abstract class XTestCase extends 
                                   getOozieUser() + "/localhost") + "@" + getRealm();
     }
 
+    protected MiniHCatServer getHCatalogServer() {
+        return hcatServer;
+    }
+
     //TODO Fix this
     /**
      * Clean up database schema
@@ -684,6 +702,7 @@ public abstract class XTestCase extends 
 
     private static MiniDFSCluster dfsCluster = null;
     private static MiniMRCluster mrCluster = null;
+    private static MiniHCatServer hcatServer = null;
 
     private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
         if (dfsCluster == null && mrCluster == null) {
@@ -759,6 +778,14 @@ public abstract class XTestCase extends 
         }
     }
 
+    private void setupHCatalogServer() throws Exception {
+        if (hcatServer == null) {
+            hcatServer = new MiniHCatServer(RUNMODE.SERVER, createJobConf());
+            hcatServer.start();
+            log.info("Metastore server started at " + hcatServer.getMetastoreURI());
+        }
+    }
+
     private static void shutdownMiniCluster() {
         try {
             if (mrCluster != null) {
@@ -857,5 +884,24 @@ public abstract class XTestCase extends 
             throw new RuntimeException(ex);
         }
     }
+
+    protected Services setupServicesForHCatalog() throws ServiceException {
+        Services services = new Services();
+        Configuration conf = services.getConf();
+        conf.set(Services.CONF_SERVICE_EXT_CLASSES,
+                JMSAccessorService.class.getName() + "," +
+                PartitionDependencyManagerService.class.getName() + "," +
+                HCatAccessorService.class.getName());
+        conf.set(HCatAccessorService.JMS_CONNECTIONS_PROPERTIES,
+                "default=java.naming.factory.initial#" + ActiveMQConnFactory + ";" +
+                "java.naming.provider.url#" + localActiveMQBroker +
+                "connectionFactoryNames#"+ "ConnectionFactory");
+        conf.set(URIHandlerService.URI_HANDLERS,
+                FSURIHandler.class.getName() + "," + HCatURIHandler.class.getName());
+        setSystemProperty("java.naming.factory.initial", "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
+        setSystemProperty("java.naming.provider.url", "vm://localhost?broker.persistent=false");
+        return services;
+    }
+
 }
 

Modified: oozie/trunk/core/src/test/java/org/apache/oozie/util/TestGraphGenerator.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/util/TestGraphGenerator.java?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/util/TestGraphGenerator.java (original)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/util/TestGraphGenerator.java Mon Feb 25 21:42:07 2013
@@ -109,6 +109,7 @@ public class TestGraphGenerator extends 
 
     private static String readFile(String path) throws IOException {
         File f = new File(path);
+        System.out.println("Reading input file " + f.getAbsolutePath());
         FileInputStream stream = new FileInputStream(f);
         try {
             FileChannel fc = stream.getChannel();

Added: oozie/trunk/core/src/test/java/org/apache/oozie/util/TestHCatURI.java
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/java/org/apache/oozie/util/TestHCatURI.java?rev=1449911&view=auto
==============================================================================
--- oozie/trunk/core/src/test/java/org/apache/oozie/util/TestHCatURI.java (added)
+++ oozie/trunk/core/src/test/java/org/apache/oozie/util/TestHCatURI.java Mon Feb 25 21:42:07 2013
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.oozie.util;
+
+import static org.junit.Assert.*;
+import java.net.URISyntaxException;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.junit.Test;
+import org.apache.oozie.util.HCatURI;
+
+public class TestHCatURI {
+
+    @Test
+    public void testHCatURIParseValidURI() {
+        String input = "hcat://hcat.server.com:5080/mydb/clicks/datastamp=12;region=us";
+        HCatURI uri = null;
+        try {
+            uri = new HCatURI(input);
+        }
+        catch (Exception ex) {
+            System.err.print(ex.getMessage());
+        }
+        assertEquals(uri.getServerEndPoint(), "hcat://hcat.server.com:5080");
+        assertEquals(uri.getDb(), "mydb");
+        assertEquals(uri.getTable(), "clicks");
+        assertEquals(uri.getPartitionValue("datastamp"), "12");
+        assertEquals(uri.getPartitionValue("region"), "us");
+
+    }
+
+    @Test(expected = URISyntaxException.class)
+    public void testHCatURIParseInvalidURI() throws Exception {
+        String input = "hcat://hcat.server.com:5080/mydb/clicks/datastamp=12;region=us/invalid";
+        new HCatURI(input);
+    }
+
+    @Test(expected = URISyntaxException.class)
+    public void testHCatURIParseInvalidPartition() throws Exception {
+        String input = "hcat://hcat.server.com:5080/mydb/clicks/datastamp";
+        new HCatURI(input);
+    }
+
+    @Test(expected = URISyntaxException.class)
+    public void testHCatURIParseServerMissing() throws Exception {
+        String input = "hcat:///mydb/clicks/datastamp=12;region=us";
+        new HCatURI(input);
+    }
+
+    @Test(expected = URISyntaxException.class)
+    public void testHCatURIParseDBMissing() throws Exception {
+        String input = "hcat://hcat.server.com:5080//clicks/datastamp=12;region=us";
+        new HCatURI(input);
+    }
+
+    @Test(expected = URISyntaxException.class)
+    public void testHCatURIParseTableMissing() throws Exception {
+        String input = "hcat://hcat.server.com:5080/mydb//datastamp=12;region=us";
+        new HCatURI(input);
+    }
+
+    @Test
+    public void testGetHCatUri() {
+        Map<String, String> partitions = new LinkedHashMap<String, String>();
+        partitions.put("datastamp", "12");
+        partitions.put("region", "us");
+        String hcatUri = HCatURI.getHCatURI("hcat", "hcat.server.com:5080", "mydb", "clicks", partitions);
+
+        assertEquals("hcat://hcat.server.com:5080/mydb/clicks/datastamp=12;region=us", hcatUri);
+    }
+
+    @Test
+    public void testEqualsPositive() {
+        HCatURI uri1 = null;
+        HCatURI uri2 = null;
+        try {
+            uri1 = new HCatURI("hcat://hcat.server.com:5080/mydb/clicks/datastamp=12;region=us;timestamp=1201");
+            uri2 = new HCatURI("hcat://hcat.server.com:5080/mydb/clicks/datastamp=12;region=us;timestamp=1201");
+        }
+        catch (URISyntaxException e) {
+            fail(e.getMessage());
+        }
+
+        assertEquals(uri1, uri2);
+    }
+
+    @Test
+    public void testEqualsNegative() {
+        HCatURI uri1 = null;
+        HCatURI uri2 = null;
+        HCatURI uri3 = null;
+        HCatURI uri4 = null;
+        HCatURI uri5 = null;
+        try {
+            uri1 = new HCatURI("hcat://hcat.server.com:5080/mydb/clicks/datastamp=12;region=us;timestamp=1201");
+            uri2 = new HCatURI("hcat://hcat.server.com:5080/mydb2/clicks/region=us;timestamp=1201;datastamp=12");
+            uri3 = new HCatURI("hcat://hcat.server.com:5080/mydb/clicks2/region=us;timestamp=1201;datastamp=12");
+            uri4 = new HCatURI("hcat://hcat.server.com:5080/mydb/clicks/region=uk;timestamp=1201;datastamp=12");
+            uri5 = new HCatURI("hcat://hcat.server.com:5080/mydb/clicks/region=us;timestamp=1201");
+        }
+        catch (URISyntaxException e) {
+            fail(e.getMessage());
+        }
+        assertFalse(uri1.equals(uri2));
+        assertFalse(uri2.equals(uri1));
+        assertFalse(uri1.equals(uri3));
+        assertFalse(uri3.equals(uri1));
+        assertFalse(uri1.equals(uri4));
+        assertFalse(uri4.equals(uri1));
+        assertFalse(uri1.equals(uri5));
+        assertFalse(uri5.equals(uri1));
+    }
+
+    @Test
+    public void testToFilter() {
+        String hcatURI = "hcat://hcat.server.com:5080/mydb/clicks/datastamp=20120230;region=us";
+        String filter = "";
+        try {
+            filter = new HCatURI(hcatURI).toPartitionFilter("java");
+        }
+        catch (URISyntaxException e) {
+            fail(e.getMessage());
+        }
+        assertTrue(filter.equals("(datastamp='20120230' AND region='us')")
+                || filter.equals("(region='us' AND datastamp='20120230')"));
+    }
+}

Added: oozie/trunk/core/src/test/resources/coord-job-for-matd-hcat.xml
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/resources/coord-job-for-matd-hcat.xml?rev=1449911&view=auto
==============================================================================
--- oozie/trunk/core/src/test/resources/coord-job-for-matd-hcat.xml (added)
+++ oozie/trunk/core/src/test/resources/coord-job-for-matd-hcat.xml Mon Feb 25 21:42:07 2013
@@ -0,0 +1,76 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<coordinator-app xmlns='uri:oozie:coordinator:0.2' name='NAME'
+frequency="1" start='2009-02-01T01:00Z' end='2009-02-03T23:59Z'
+timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+    <controls>
+        <timeout>10</timeout>
+        <concurrency>2</concurrency>
+        <execution>LIFO</execution>
+    </controls>
+    <input-events>
+        <data-in name='A' dataset='a'>
+        <dataset name='a' frequency='7' initial-instance='2009-01-01T01:00Z'
+            timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+            <uri-template>hcat://dummyhcat:1000/db1/table1/ds=${YEAR}-${DAY}
+            </uri-template>
+        </dataset>
+        <instance>${coord:current(-3)}</instance>
+        </data-in>
+
+        <data-in name='B' dataset='b'>
+        <dataset name='b' frequency='7' initial-instance='2009-01-01T01:00Z'
+            timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+            <uri-template>hcat://dummyhcat:1000/db2/table2/ds=${YEAR}-${DAY}
+            </uri-template>
+        </dataset>
+        <start-instance>${coord:latest(-1)}</start-instance>
+        <end-instance>${coord:latest(0)}</end-instance>
+        </data-in>
+        <data-in name='C' dataset='c'>
+        <dataset name='c' frequency='7' initial-instance='2009-01-01T01:00Z'
+            timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+            <uri-template>hcat://dummyhcat:1000/db3/table3/ds=${YEAR}-${DAY}
+            </uri-template>
+        </dataset>
+        <start-instance>${coord:current(-1)}</start-instance>
+        <end-instance>${coord:current(0)}</end-instance>
+        </data-in>
+
+        <data-in name='D' dataset='d'>
+        <dataset name='d' frequency='7' initial-instance='2009-01-01T01:00Z'
+            timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+            <uri-template>file://dummyhdfs/${YEAR}/${DAY}
+            </uri-template>
+        </dataset>
+        <instance>${coord:current(0)}</instance>
+        </data-in>
+        </input-events>
+
+        <action>
+            <workflow>
+                <app-path>hdfs:///tmp/workflows/</app-path>
+                <configuration>
+                    <property>
+                        <name>inputA</name>
+                        <value>${coord:dataIn('A')}</value>
+                    </property>
+                </configuration>
+            </workflow>
+        </action>
+</coordinator-app>

Added: oozie/trunk/core/src/test/resources/coord-job-for-matd-neg-hcat.xml
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/resources/coord-job-for-matd-neg-hcat.xml?rev=1449911&view=auto
==============================================================================
--- oozie/trunk/core/src/test/resources/coord-job-for-matd-neg-hcat.xml (added)
+++ oozie/trunk/core/src/test/resources/coord-job-for-matd-neg-hcat.xml Mon Feb 25 21:42:07 2013
@@ -0,0 +1,47 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<coordinator-app xmlns='uri:oozie:coordinator:0.2' name='NAME'
+    frequency="1" start='2009-02-01T01:00Z' end='2009-02-03T23:59Z'
+    timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+    <controls>
+        <timeout>10</timeout>
+        <concurrency>2</concurrency>
+        <execution>LIFO</execution>
+    </controls>
+    <input-events>
+    <data-in name='A' dataset='a'>
+        <dataset name='a' frequency='7' initial-instance='2009-01-01T01:00Z'
+         timezone='UTC' freq_timeunit='DAY' end_of_duration='NONE'>
+        <uri-template>hcat://dummyhcat:1000/table/ds=${YEAR}/${DAY};region=us
+        </uri-template>
+        </dataset>
+        <instance>${coord:current(-3)}</instance>
+    </data-in>
+    </input-events>
+    <action>
+        <workflow>
+        <app-path>hdfs:///tmp/workflows/</app-path>
+        <configuration>
+            <property>
+            <name>inputA</name>
+            <value>${coord:dataIn('A')}</value>
+            </property>
+        </configuration>
+        </workflow>
+     </action>
+</coordinator-app>

Added: oozie/trunk/core/src/test/resources/ehcache.xml
URL: http://svn.apache.org/viewvc/oozie/trunk/core/src/test/resources/ehcache.xml?rev=1449911&view=auto
==============================================================================
--- oozie/trunk/core/src/test/resources/ehcache.xml (added)
+++ oozie/trunk/core/src/test/resources/ehcache.xml Mon Feb 25 21:42:07 2013
@@ -0,0 +1,39 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<ehcache>
+    <!--  http://svn.codehaus.org/sonar/tags/1.1/sonar-commons/src/main/resources/ehcache.xml -->
+    <diskStore path="target"/>
+    <cache name="testspilltodisk" maxElementsInMemory="20000" eternal="false"
+        overflowToDisk="true" timeToIdleSeconds="500" timeToLiveSeconds="0"
+        diskPersistent="false" diskExpiryThreadIntervalSeconds="120" />
+    <cache name="testnospilltodisk" maxElementsInMemory="0" eternal="false"
+        overflowToDisk="false" timeToIdleSeconds="500" timeToLiveSeconds="0"
+        diskPersistent="false" diskExpiryThreadIntervalSeconds="120" />
+    <!--  Note: overflowToDisk=true does not work well with timeToIdleSeconds or timeToLiveSeconds.
+          Returns null even for unexpired entries. If maxElementsInMemory="5" and overflowToDisk="true"
+          testevictionontimetoidle and testevictionontimetolive  tests will fail -->
+    <cache name="testevictionontimetoidle" maxElementsInMemory="0" eternal="false"
+        overflowToDisk="false" timeToIdleSeconds="1" timeToLiveSeconds="0"
+        diskPersistent="false" diskExpiryThreadIntervalSeconds="120" />
+    <cache name="testevictionontimetolive" maxElementsInMemory="0" eternal="false"
+        overflowToDisk="false" timeToIdleSeconds="0" timeToLiveSeconds="1"
+        diskPersistent="false" diskExpiryThreadIntervalSeconds="120" />
+    <cache name="testmaxelementsinmemory" maxElementsInMemory="500" eternal="false"
+        overflowToDisk="false" timeToIdleSeconds="0" timeToLiveSeconds="0"
+        diskPersistent="false" diskExpiryThreadIntervalSeconds="120" />
+</ehcache>

Modified: oozie/trunk/docs/src/site/twiki/CoordinatorFunctionalSpec.twiki
URL: http://svn.apache.org/viewvc/oozie/trunk/docs/src/site/twiki/CoordinatorFunctionalSpec.twiki?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/docs/src/site/twiki/CoordinatorFunctionalSpec.twiki (original)
+++ oozie/trunk/docs/src/site/twiki/CoordinatorFunctionalSpec.twiki Mon Feb 25 21:42:07 2013
@@ -12,6 +12,10 @@ The goal of this document is to define a
 
 ---++ Changelog
 
+---+++!! 07/JAN/2013
+
+   * #6.8 Added section on new EL functions for datasets defined with HCatalog
+
 ---+++!! 26/JUL/2012
 
    * #Appendix A, updated XML schema 0.4 to include =parameters= element
@@ -93,11 +97,16 @@ This document defines the functional spe
 
 *Actual time:* The actual time indicates the time when something actually happens.
 
-*Nominal time:* The nominal time specifies the time when something should happen. In theory the nominal time and the actual time should mach, however, in practice due to delays the actual time may occur later than the nominal time.
+*Nominal time:* The nominal time specifies the time when something should happen. In theory the nominal time and the actual time should match, however, in practice due to delays the actual time may occur later than the nominal time.
 
-*Dataset:* Collection of data referred to by a logical name. A dataset normally has several instances of data and each one of them can be referred individually. Each dataset instance is represented by a unique set of URIs.
+*Dataset:* Collection of data referred to by a logical name. A dataset normally has several instances of data and each
+one of them can be referred individually. Each dataset instance is represented by a unique set of URIs.
 
-*Synchronous Dataset:* Synchronous datasets instances are generated at fixed time intervals and there is a dataset instance associated with each time interval. Synchronous dataset instances are identified by their nominal time. For example, in the case of a file system based dataset, the nominal time would be somewhere in the file path of the dataset instance: =hdfs://foo:8020/usr/logs/2009/04/15/23/30= .
+*Synchronous Dataset:* Synchronous datasets instances are generated at fixed time intervals and there is a dataset
+instance associated with each time interval. Synchronous dataset instances are identified by their nominal time.
+For example, in the case of a HDFS based dataset, the nominal time would be somewhere in the file path of the
+dataset instance: hdfs://foo:8020/usr/logs/2009/04/15/23/30. In the case of HCatalog table partitions, the nominal time
+would be part of some partition values: hcat://bar:8020/mydb/mytable/year=2009;month=04;dt=15;region=us.
 
 *Coordinator Action:* A coordinator action is a workflow job that is started when a set of conditions are met (input dataset instances are available).
 
@@ -572,11 +581,29 @@ The dataset resolves to the following UR
   ...
 </verbatim>
 
+---+++ 5.2. Dataset URI-Template types
+
+Each dataset URI could be a HDFS path URI denoting a HDFS directory: hdfs://foo:8020/usr/logs/20090415 or a
+HCatalog partition URI identifying a set of table partitions: hcat://bar:8020/logsDB/logsTable/dt=20090415;region=US.
 
----+++ 5.2. Asynchronous Datasets
+HCatalog enables table and storage management for Pig, Hive and MapReduce. The format to specify a HCatalog table partition URI is
+hcat://[metastore server]:[port]/[database name]/[table name]/[partkey1]=[value];[partkey2]=[value];...
+
+For example,
+<verbatim>
+  <dataset name="logs" frequency="${coord:days(1)}"
+           initial-instance="2009-02-15T08:15Z" timezone="America/Los_Angeles">
+    <uri-template>
+      hcat://myhcatmetastore:9080/database1/table1/myfirstpartitionkey=myfirstvalue;mysecondpartitionkey=mysecondvalue
+    </uri-template>
+    <done-flag></done-flag>
+  </dataset>
+</verbatim>
+
+---+++ 5.3. Asynchronous Datasets
    * TBD
 
----+++ 5.3. Dataset Definitions
+---+++ 5.4. Dataset Definitions
 
 Dataset definitions are grouped in XML files.
 *IMPORTANT:* Please note that if an XML namespace version is specified for the coordinator-app element in the coordinator.xml file, no namespace needs to be defined separately for the datasets element (even if the dataset is defined in a separate file). Specifying it at multiple places might result in xml errors while submitting the coordinator job.
@@ -2226,11 +2253,375 @@ If coordinator job was started at 2011-0
 
 The =coord:user()= function returns the user that started the coordinator job.
 
----+++ 6.8. Parameterization of Coordinator Application
+---+++ 6.8 Using HCatalog data instances in Coordinator Applications (since Oozie 4.x)
+
+This section describes the different EL functions that work with HCatalog data dependencies, in order to write
+Coordinator applications that use HCatalog data dependencies.
+
+---++++ 6.8.1 coord:databaseIn(String name), coord:databaseOut(String name) EL function
+
+The functions =${coord:databaseIn(String name}= and =${coord:databaseOut(String name}= are used to pass the database
+name of HCat dataset instances, input and output respectively, that will be consumed by a workflow job triggered
+by a coordinator action.
+
+These functions take as argument - name of the dataset and give as string the 'database' name of that dataset.
+If dataset belongs to 'input-events', use =${coord:databaseIn(String name}=, else if it belongs to 'output-events',
+use =${coord:databaseOut(String name}=.
+
+Refer to the [[CoordinatorFunctionalSpec#HCatPigExampleOne][Example]] below for usage.
+
+---++++ 6.8.2 coord:tableIn(String name), coord:tableOut(String name) EL function
+
+The functions =${coord:tableIn(String name}= and =${coord:tableOut(String name}= are used to pass the table
+name of HCat dataset instances, input and output respectively, that will be consumed by a workflow job triggered
+by a coordinator action.
+
+These functions take as argument - name of the dataset and give as string the 'table' name of that dataset.
+If dataset belongs to 'input-events', use =${coord:tableIn(String name}=, else if it belongs to 'output-events',
+use =${coord:tableOut(String name}=.
+
+Refer to the [[CoordinatorFunctionalSpec#HCatPigExampleOne][Example]] below for usage.
+
+---++++ 6.8.3 coord:dataInPartitionFilter(String name, String type) EL function
+
+The =${coord:dataInPartitionFilter(String name, String type)}= EL function resolves to a filter clause to filter
+all the partitions corresponding to the dataset instances specified in an input event dataset section. This EL function
+takes two arguments - the name of the input dataset, and the type of the workflow action which will be consuming this filter.
+There are 3 types - 'pig', 'hive' and 'java'. This filter clause from the EL function is to be passed as a parameter in the
+respective action in the workflow.
+
+The evaluated value of the filter clause will vary based on the action type passed to the EL function. In case of pig,
+the filter will have "==" as the equality operator in the condition. In case of hive and java, the filter will have "="
+as the equality operator in the condition. The type java is for java actions, which use HCatInputFormat directly and
+launch jobs. The filter clause in that case can be used to construct the InputJobInfo in
+=HCatInputFormat.setInput(Job job, InputJobInfo inputJobInfo)=.
+
+Refer to the [[CoordinatorFunctionalSpec#HCatPigExampleOne][Example]] below for usage.
+
+---++++ 6.8.4 coord:dataOutPartitions(String name) EL function
+
+The =${coord:dataOutPartitions(String name)}= EL function resolves to a comma-separated list of partition key-value
+pairs for the output-event dataset. This can be passed as an argument to HCatStorer in Pig scripts or in case of
+java actions that directly use HCatOutputFormat and launch jobs, the partitions list can be parsed to construct
+partition values map for OutputJobInfo in =HcatOutputFormat.setOutput(Job job, OutputJobInfo outputJobInfo)=.
+
+The example below illustrates a pig job triggered by a coordinator, using the EL functions for HCat database, table,
+input partitions filter and output partitions. The example takes as input previous day's hourly data to produce
+aggregated daily output.
+
+
+*%GREEN% Example: %ENDCOLOR%*
+
+#HCatPigExampleOne
+---++++ Coordinator application definition:
+
+<blockquote>
+   <coordinator-app name="app-coord" frequency="${coord:days(1)}"
+                    start="2009-01-01T24:00Z" end="2009-12-31T24:00Z" timezone="UTC"
+                    xmlns="uri:oozie:coordinator:0.3">
+      <datasets>
+        <dataset name="Click-data" frequency="${coord:hours(1)}"
+                 initial-instance="2009-01-01T01:00Z" timezone="UTC">
+          <uri-template>
+             hcat://foo:11002/myInputDatabase/myInputTable/datestamp=${YEAR}${MONTH}${DAY}${HOUR};region=USA
+          </uri-template>
+        </dataset>
+        <dataset name="Stats" frequency="${coord:days(1)}"
+                 initial-instance="2009-01-01T01:00Z" timezone="UTC">
+          <uri-template>
+             hcat://foo:11002/myOutputDatabase/myOutputTable/datestamp=${YEAR}${MONTH}${DAY}
+          </uri-template>
+        </dataset>
+      </datasets>
+      <input-events>
+        <data-in name="raw-logs" dataset="Click-data">
+          <start-instance>${coord:current(-23)}</start-instance>
+          <end-instance>${coord:current(0)}</end-instance>
+        </data-in>
+      </input-events>
+      <output-events>
+        <data-out name="processed-logs" dataset="Stats">
+          <instance>${coord:current(0)}</instance>
+        </data-out>
+      </output-events>
+      <action>
+        <workflow>
+          <app-path>hdfs://bar:8020/usr/joe/logsprocessor-wf</app-path>
+          <configuration>
+            <property>
+              <name>IN_DB</name>
+              <value>${coord:databaseIn('Click-data')}</value>
+            </property>
+            <property>
+              <name>IN_TABLE</name>
+              <value>${coord:tableIn('raw-logs')}</value>
+            </property>
+            <property>
+              <name>FILTER</name>
+              <value>${coord:dataInPartitionFilter('raw-logs', 'pig')}</value>
+            </property>
+            <property>
+              <name>OUT_DB</name>
+              <value>${coord:databaseOut('processed-logs')}</value>
+            </property>
+            <property>
+              <name>OUT_TABLE</name>
+              <value>${coord:tableOut('processed-logs')}</value>
+            </property>
+            <property>
+              <name>OUT_PARTITIONS</name>
+              <value>${coord:dataOutPartitions('processed-logs')}</value>
+            </property>
+         </configuration>
+       </workflow>
+      </action>
+   </coordinator-app>
+</blockquote>
+
+
+Parameterizing the input/output databases and tables using the corresponding EL function as shown will make them
+available in the pig action of the workflow 'logsprocessor-wf'.
+
+Each coordinator action will use as input events the last 24 hourly instances of the 'Click-data' dataset.
+The =${coord:dataInPartitionFilter(String name, String type)}= function enables the coordinator application to pass the
+Partition Filter corresponding to all the dataset instances for the last 24 hours to the workflow job triggered
+by the coordinator action. The =${coord:dataOutPartitions(String name)}= function enables the coordinator application
+to pass the partition key-value string needed by the *HCatStorer* in Pig job when the workflow is triggered by the
+coordinator action.
+
+#HCatWorkflow
+---++++ Workflow definition:
+
+<blockquote>
+<workflow-app xmlns="uri:oozie:workflow:0.3" name="logsprocessor-wf">
+    <credentials>
+      <credential name='hcatauth' type='hcat'>
+        <property>
+          <name>hcat.metastore.uri</name>
+          <value>${HCAT_URI}</value>
+        <property>
+        </property>
+          <name>hcat.metastore.principal</name>
+          <value>${HCAT_PRINCIPAL}</value>
+        <property>
+      </credential>
+    </credentials>
+    <start to="pig-node"/>
+    <action name="pig-node" cred="hcatauth">
+        <pig>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <prepare>
+                <delete path="hcat://foo:11002/${OUT_DB}/${OUT_TABLE}/date=${OUT_PARTITION_VAL_DATE}"/>
+            </prepare>
+            ...
+            <script>id.pig</script>
+            <param>HCAT_IN_DB=${IN_DB}</param>
+            <param>HCAT_IN_TABLE=${IN_TABLE}</param>
+            <param>HCAT_OUT_DB=${OUT_DB}</param>
+            <param>HCAT_OUT_TABLE=${OUT_TABLE}</param>
+            <param>PARTITION_FILTER=${FILTER}</param>
+            <param>OUTPUT_PARTITIONS=${OUT_PARTITIONS}</param>
+        <file>lib/hive-site.xml</file>
+        </pig>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>Pig failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
+    </kill>
+    <end name="end"/>
+</workflow-app>
+</blockquote>
+
+Ensure that the following jars are in classpath, with versions corresponding to hcatalog installation:
+hcatalog-core.jar, webhcat-java-client.jar, hive-common.jar, hive-exec.jar, hive-metastore.jar, hive-serde.jar,
+libfb303.jar and pig.jar. You can also specify the jars using =archive= tag. The hive-site.xml needs to be
+provided using =file= tag.
+
+*Example usage in Pig:*
+
+<blockquote>
+A = load '$HCAT_IN_DB.$HCAT_IN_TABLE' using org.apache.hcatalog.pig.HCatLoader();
+B = FILTER A BY $PARTITION_FILTER;
+C = foreach B generate foo, bar;
+store C into '$HCAT_OUT_DB.$HCAT_OUT_TABLE' using org.apache.hcatalog.pig.HCatStorer('$OUTPUT_PARTITIONS');
+</blockquote>
+
+For the =2009-01-02T00:00Z= run with the given dataset instances, the above Pig script with resolved values would look
+like:
+
+<blockquote>
+A = load 'myInputDatabase.myInputTable' using org.apache.hcatalog.pig.HCatLoader();
+B = FILTER A BY ((datestamp==2009010101 AND region==USA) OR
+    (datestamp==2009010102 AND region==USA) OR
+    ...
+    (datestamp==2009010123 AND region==USA) OR
+    (datestamp==2009010200 AND region==USA));
+C = foreach B generate foo, bar;
+store C into 'myOutputDatabase.myOutputTable' using org.apache.hcatalog.pig.HCatStorer('datestamp=20090102,region=EUR');
+</blockquote>
+
+---++++ 6.8.5 coord:dataInPartitionMin(String name, String partition) EL function
+
+The =${coord:dataInPartitionMin(String name, String partition)}= EL function resolves to the *minimum* value of the
+specified partition for all the dataset instances specified in an input event dataset section. It can be used to do
+range based filtering of partitions in pig scripts together
+with [[CoordinatorFunctionalSpec#DataInPartitionMax][dataInPartitionMax]] EL function.
+
+Refer to the [[CoordinatorFunctionalSpec#HCatPigExampleTwo][Example]] below for usage.
+
+#DataInPartitionMax
+---++++ 6.8.6 coord:dataInPartitionMax(String name, String partition) EL function
+
+The =${coord:dataInPartitionMax(String name, String partition)}= EL function resolves to the *maximum* value of the
+specified partition for all the dataset instances specified in an input event dataset section. It is a better practice
+to use =dataInPartitionMin= and =dataInPartitionMax= to form a range filter wherever possible instead
+of =datainPartitionPigFilter= as it will be more efficient for filtering.
+
+Refer to the [[CoordinatorFunctionalSpec#HCatPigExampleTwo][Example]] below for usage.
+
+---++++ 6.8.7 coord:dataOutPartitionValue(String name, String partition) EL function
+
+The =${coord:dataOutPartitionValue(String name, String partition)}= EL function resolves to value of the specified
+partition for the output-event dataset; that will be consumed by a workflow job, e.g Pig job triggered by a
+coordinator action. This is another convenience function to use a single partition-key's value if required, in
+addition to =dataoutPartitionsPig()= and either one can be used.
+
+The example below illustrates a pig job triggered by a coordinator, using the aforementioned EL functions for input
+partition max/min values, output partition value, and database and table.
+
+*%GREEN% Example: %ENDCOLOR%*
+
+#HCatPigExampleTwo
+---++++ Coordinator application definition:
+
+<blockquote>
+   <coordinator-app name="app-coord" frequency="${coord:days(1)}"
+                    start="2009-01-01T24:00Z" end="2009-12-31T24:00Z" timezone="UTC"
+                    xmlns="uri:oozie:coordinator:0.1">
+      <datasets>
+        <dataset name="Click-data" frequency="${coord:hours(1)}"
+                 initial-instance="2009-01-01T01:00Z" timezone="UTC">
+          <uri-template>
+             hcat://foo:11002/myInputDatabase/myInputTable/datestamp=${YEAR}${MONTH}${DAY}${HOUR};region=USA
+          </uri-template>
+        </dataset>
+        <dataset name="Stats" frequency="${coord:days(1)}"
+                 initial-instance="2009-01-01T01:00Z" timezone="UTC">
+          <uri-template>
+             hcat://foo:11002/myOutputDatabase/myOutputTable/datestamp=${YEAR}${MONTH}${DAY};region=USA
+          </uri-template>
+        </dataset>
+      </datasets>
+      <input-events>
+        <data-in name="raw-logs" dataset="Click-data">
+          <start-instance>${coord:current(-23)}</start-instance>
+          <end-instance>${coord:current(0)}</end-instance>
+        </data-in>
+      </input-events>
+      <output-events>
+        <data-out name="processed-logs" dataset="Stats">
+          <instance>${coord:current(0)}</instance>
+        </data-out>
+      </output-events>
+      <action>
+        <workflow>
+          <app-path>hdfs://bar:8020/usr/joe/logsprocessor-wf</app-path>
+          <configuration>
+            <property>
+              <name>IN_DB</name>
+              <value>${coord:databaseIn('raw-logs')}</value>
+            </property>
+            <property>
+              <name>IN_TABLE</name>
+              <value>${coord:tableIn('raw-logs')}</value>
+            </property>
+            <property>
+              <name>DATE_MIN</name>
+              <value>${coord:dataInPartitionMin('raw-logs','datestamp')}</value>
+            </property>
+            <property>
+              <name>DATE_MAX</name>
+              <value>${coord:dataInPartitionMax('raw-logs','datestamp')}</value>
+            </property>
+            <property>
+              <name>OUT_DB</name>
+              <value>${coord:databaseOut('processed-logs')}</value>
+            </property>
+            <property>
+              <name>OUT_TABLE</name>
+              <value>${coord:tableOut('processed-logs')}</value>
+            </property>
+            <property>
+              <name>OUT_PARTITION_VAL_REGION</name>
+              <value>${coord:dataOutPartitionValue('processed-logs','region')}</value>
+            </property>
+            <property>
+              <name>OUT_PARTITION_VAL_DATE</name>
+              <value>${coord:dataOutPartitionValue('processed-logs','datestamp')}</value>
+            </property>
+         </configuration>
+       </workflow>
+      </action>
+   </coordinator-app>
+</blockquote>
+
+In this example, each coordinator action will use as input events the last 24 hourly instances of the 'logs' dataset.
+
+For the =2009-01-02T00:00Z= run, the =${coord:dataInPartitionMin('raw-logs','datestamp')}= function will resolve to
+the minimum of the 5 dataset instances for partition 'datestamp'
+i.e. among 2009010101, 2009010102, ...., 2009010123,  2009010200, the minimum would be "2009010101".
+
+Similarly, the =${coord:dataInPartitionMax('raw-logs','datestamp')}= function will resolve to the maximum of the 5
+dataset instances for partition 'datestamp'
+i.e. among 2009010120, 2009010121, ...., 2009010123, 2009010200, the maximum would be "2009010200".
+
+Finally, the =${coord:dataOutPartitionValue(String name, String partition)}= function enables the coordinator
+application to pass a specified partition's value string needed by the HCatStorer in Pig job.
+The =${coord:dataOutPartitionValue('processed-logs','region')}= function will resolve to: "${region}"
+and =${coord:dataOutPartitionValue('processed-logs','datestamp')}= function will resolve to: "20090102".
+
+For the workflow definition with <pig> action, refer to [[CoordinatorFunctionalSpec#HCatWorkflow][previous example]],
+with the following change in pig params in addition to database and table.
+
+<blockquote>
+...
+<param>PARTITION_DATE_MIN=${DATE_MIN}</param>
+<param>PARTITION_DATE_MAX=${DATE_MAX}</param>
+<param>REGION=${region}</param>
+<param>OUT_PARTITION_VAL_REGION=${OUT_PARTITION_VAL_REGION}</param>
+<param>OUT_PARTITION_VAL_DATE=${OUT_PARTITION_VAL_DATE}</param>
+...
+</blockquote>
+
+*Example usage in Pig:*
+This illustrates another pig script which filters partitions based on range, with range limits parameterized with the
+EL funtions
+
+<blockquote>
+A = load '$HCAT_IN_DB.$HCAT_IN_TABLE' using org.apache.hcatalog.pig.HCatLoader();
+B = FILTER A BY datestamp >= '$PARTITION_DATE_MIN' AND datestamp < '$PARTITION_DATE_MAX' AND region=='$REGION';
+C = foreach B generate foo, bar;
+store C into '$HCAT_OUT_DB.$HCAT_OUT_TABLE' using org.apache.hcatalog.pig.HCatStorer('region=$OUT_PARTITION_VAL_REGION,datestamp=$OUT_PARTITION_VAL_DATE');
+</blockquote>
+
+For example,
+for the =2009-01-02T00:00Z= run with the given dataset instances, the above Pig script with resolved values would look like:
+
+<blockquote>
+A = load 'myInputDatabase.myInputTable' using org.apache.hcatalog.pig.HCatLoader();
+B = FILTER A BY datestamp >= '2009010101' AND datestamp < '2009010200' AND region='APAC';
+C = foreach B generate foo, bar;
+store C into 'myOutputDatabase.myOutputTable' using org.apache.hcatalog.pig.HCatStorer('region=APAC,datestamp=20090102');
+</blockquote>
+
+
+---+++ 6.9. Parameterization of Coordinator Application
 
 This section describes the EL functions that could be used to parameterized both data-set and coordination application action.
 
----++++ 6.8.1. coord:dateOffset(String baseDate, int instance, String timeUnit) EL Function
+---++++ 6.9.1. coord:dateOffset(String baseDate, int instance, String timeUnit) EL Function
 
 The =${coord:dateOffset(String baseDate, int instance, String timeUnit)}= EL function calculates date based on the following equaltion : =newDate = baseDate + instance,  * timeUnit=
 
@@ -2264,7 +2655,7 @@ For example, if baseDate is '2009-01-01T
 
 In this example, the 'nextInstance' will be '2009-01-02T24:00Z' for the first action. And the value of 'previousInstance' will be  '2008-12-31T24:00Z' for the same instance.
 
----++++ 6.8.2. coord:formatTime(String ts, String format) EL Function (since Oozie 2.3.2)
+---++++ 6.9.2. coord:formatTime(String ts, String format) EL Function (since Oozie 2.3.2)
 
 The =${coord:formatTime(String timeStamp, String format)}= function allows transformation of the standard ISO8601 timestamp strings into other desired formats.
 

Modified: oozie/trunk/docs/src/site/twiki/DG_QuickStart.twiki
URL: http://svn.apache.org/viewvc/oozie/trunk/docs/src/site/twiki/DG_QuickStart.twiki?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/docs/src/site/twiki/DG_QuickStart.twiki (original)
+++ oozie/trunk/docs/src/site/twiki/DG_QuickStart.twiki Mon Feb 25 21:42:07 2013
@@ -63,7 +63,7 @@ The ExtJS library is optional (only requ
 and =oozie-stop.sh=) run only under the Unix user that owns the Oozie installation directory,
 if necessary use =sudo -u OOZIE_USER= when invoking the scripts.
 
-As of Oozie 3.3.2, use of =oozie-start.sh=, =oozie-run.sh=, and =oozie-stop.sh= has
+As of Oozie 3.4.0, use of =oozie-start.sh=, =oozie-run.sh=, and =oozie-stop.sh= has
 been deprecated and will print a warning. The =oozied.sh= script should be used
 instead; passing it =start=, =run=, or =stop= as an argument will perform the
 behaviors of =oozie-start.sh=, =oozie-run.sh=, and =oozie-stop.sh= respectively.

Modified: oozie/trunk/docs/src/site/twiki/ENG_Custom_Authentication.twiki
URL: http://svn.apache.org/viewvc/oozie/trunk/docs/src/site/twiki/ENG_Custom_Authentication.twiki?rev=1449911&r1=1449910&r2=1449911&view=diff
==============================================================================
--- oozie/trunk/docs/src/site/twiki/ENG_Custom_Authentication.twiki (original)
+++ oozie/trunk/docs/src/site/twiki/ENG_Custom_Authentication.twiki Mon Feb 25 21:42:07 2013
@@ -238,27 +238,10 @@ included with a GET request will go, and
 The above value, which is the default, is a basic html page that has fields for the username and password and meets the previously
 stated requirements.
 
-The =oozie.web.login.auth= cookie will expire 3 minutes after being given to the user.  Once the user has been redirected back to
-the Oozie web console and given the AuthenticationToken, the =oozie.web.login.auth= cookie is no longer used.  If the
-AuthenticationToken expires but the user still has a valid =oozie.web.login.auth= cookie, the ExampleAltAuthenticationHandler will
-simply give out a new AuthenticationToken; the desired behavior is that the user is bounced back to the oozie-login.war server to
-re-authenticate, hence the very short lifetime of the =oozie.web.login.auth= cookie.  However, the expiration time of the cookie
-is configurable by changing the following parameter in the web.xml in the oozie-login.war file (or in the
-login/src/main/webapp/WEB-INF/ directory before building it).  It is given in seconds.  A positive value indicates that the cookie
-will expire after that many seconds have passed; make sure this value is high enough to allow the user to be forwarded to the
-backurl before the cookie expires.  A negative value indicates that the cookie will be deleted when the browser exits.
-<verbatim>
-    <init-param>
-        <param-name>login.auth.cookie.expire.time</param-name>
-        <param-value>180</param-value>
-    </init-param>
-</verbatim>
-The above value, which is the default, is the number of seconds in 3 minutes.
-
 ---+++ LDAPLoginServlet
 
 This is a second web servlet that gets bundled in the oozie-login.war web application.  It inherits from the LoginServlet, so the
-previous configuration information (e.g. login.page.template) still applies to this servlet.  The only difference between the
+previous configuration information (i.e. login.page.template) still applies to this servlet.  The only difference between the
 LDAPLoginServlet and the LoginServlet, is that the LDAPLoginServlet is configured against an LDAP server to provide the
 authentication instead of simply checking that the username and password are equal.  As before, this is not secure and should not be
 used in production; it is only provided as an example.
@@ -269,9 +252,7 @@ The oozie-login.war web application is c
 have to change the following line in the web.xml from:
 <verbatim>
     <servlet-class>org.apache.oozie.servlet.login.LoginServlet</servlet-class>
-</verbatim>
 to:
-<verbatim>
     <servlet-class>org.apache.oozie.servlet.login.LDAPLoginServlet</servlet-class>
 </verbatim>