You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cxf.apache.org by se...@apache.org on 2016/12/22 14:23:24 UTC

[2/8] cxf git commit: [CXF-7192] Removing cxf-rt-management-web module, to be restored on demand

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPullServer.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPullServer.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPullServer.java
deleted file mode 100644
index fd0b4d7..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPullServer.java
+++ /dev/null
@@ -1,622 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.WeakHashMap;
-import java.util.logging.Handler;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.StreamingOutput;
-import javax.ws.rs.core.UriBuilder;
-
-import org.apache.abdera.model.Entry;
-import org.apache.abdera.model.Feed;
-import org.apache.cxf.Bus;
-import org.apache.cxf.jaxrs.ext.MessageContext;
-import org.apache.cxf.jaxrs.ext.StreamingResponse;
-import org.apache.cxf.jaxrs.ext.search.ConditionType;
-import org.apache.cxf.jaxrs.ext.search.OrSearchCondition;
-import org.apache.cxf.jaxrs.ext.search.PrimitiveStatement;
-import org.apache.cxf.jaxrs.ext.search.SearchCondition;
-import org.apache.cxf.jaxrs.ext.search.SearchConditionVisitor;
-import org.apache.cxf.jaxrs.ext.search.SearchContext;
-import org.apache.cxf.jaxrs.utils.ExceptionUtils;
-import org.apache.cxf.management.web.logging.LogLevel;
-import org.apache.cxf.management.web.logging.LogRecord;
-import org.apache.cxf.management.web.logging.ReadWriteLogStorage;
-import org.apache.cxf.management.web.logging.ReadableLogStorage;
-import org.apache.cxf.management.web.logging.atom.converter.StandardConverter;
-
-
-@Path("/logs")
-public class AtomPullServer extends AbstractAtomBean {
-
-    private List<LogRecord> records = new LinkedList<LogRecord>();
-    private WeakHashMap<Integer, Feed> feeds = new WeakHashMap<Integer, Feed>();
-    private ReadableLogStorage storage;
-    private int pageSize = 20;
-    private int maxInMemorySize = 1000;
-    private volatile int recordsSize;
-    private volatile boolean alreadyClosed;
-    private SearchCondition<LogRecord> readableStorageCondition;
-    private Map<String, Object> activeStreams;
-    
-    @Context
-    private MessageContext context;
-    
-    private List<String> endpointAddresses;
-    private String serverAddress;
-    
-    public void setEndpointAddress(String address) {
-        setEndpointAddresses(Collections.singletonList(address));
-    }
-    
-    public void setEndpointAddresses(List<String> addresses) {
-        this.endpointAddresses = addresses;
-    }
-    
-    public void setServerAddress(String address) {
-        this.serverAddress = address;
-    }
-    
-    @Override
-    public void init() {
-        // the storage might've been used to save previous records or it might
-        // point to a file log entries are added to
-        if (storage != null) {
-            //-1 can be returned by read-only storage if it does not know in advance
-            // a number of records it may contain 
-            recordsSize = storage.getSize();
-        }
-        
-        if (storage == null || storage instanceof ReadWriteLogStorage) {
-            super.init();
-        } else {
-            // super.init() results in the additional Handler being created and publish()
-            // method being called as a result. If the storage is read-only it is assumed it points to
-            // the external source of log records thus no need to get the publish events here
-            
-            // instead we create a SearchCondition the external storage will check against when
-            // loading the matching records on request
-            
-            List<SearchCondition<LogRecord>> list = new LinkedList<SearchCondition<LogRecord>>();
-            for (LoggerLevel l : super.getLoggers()) {
-                LogRecord r = new LogRecord();
-                r.setLoggerName(l.getLogger());
-                r.setLevel(LogLevel.valueOf(l.getLevel()));
-                list.add(new SearchConditionImpl(r));
-            }
-            readableStorageCondition = list.size() == 0 ? null : new OrSearchCondition<LogRecord>(list);
-        }
-        activeStreams = Collections.synchronizedMap(new HashMap<String, Object>());
-        initBusProperty();
-    }
-    
-
-    @Override
-    protected Handler createHandler() {
-        return new AtomPullHandler(this);
-    }
-    
-    @SuppressWarnings("unchecked")
-    protected void initBusProperty() {
-        if (endpointAddresses != null && serverAddress != null && getBus() != null) {
-            Bus bus = getBus();
-            synchronized (bus) {
-                Map<String, String> addresses = 
-                    (Map<String, String>)bus.getProperty("org.apache.cxf.extensions.logging.atom.pull");
-                if (addresses == null) {
-                    addresses = new HashMap<String, String>();
-                }
-                for (String address : endpointAddresses) {
-                    addresses.put(address, serverAddress + "/logs");
-                }
-                bus.setProperty("org.apache.cxf.extensions.logging.atom.pull", addresses);
-            }
-        }
-    }
-    
-    @GET
-    @Produces("application/atom+xml")
-    public Feed getXmlFeed() {
-        return getXmlFeedWithPage(1);
-    }
-    
-    @GET
-    @Produces("application/atom+xml")
-    @Path("{id}")
-    public Feed getXmlFeedWithPage(@PathParam("id") int page) {
-        
-        // lets check if the Atom reader is asking for a set of records which has already been 
-        // converted to Feed
-        
-        synchronized (feeds) {
-            Feed f = feeds.get(page);
-            if (f != null) {
-                return f;
-            }
-        }
-        
-        Feed feed = null;
-        SearchCondition<LogRecord> condition = getCurrentCondition();
-        synchronized (records) {
-            List<LogRecord> list = new LinkedList<LogRecord>();
-            int lastPage = fillSubList(list, page, condition);
-            Collections.sort(list, new LogRecordComparator());
-            feed = (Feed)new CustomFeedConverter(page).convert(list).get(0);
-            setFeedPageProperties(feed, page, lastPage);
-        }
-        // if at the moment we've converted n < pageSize number of records only and
-        // persist a Feed keyed by a page then another reader requesting the same page 
-        // may miss latest records which might've been added since the original request
-        if (condition == null && feed.getEntries().size() == pageSize) {
-            synchronized (feeds) {
-                feeds.put(page, feed);
-            }
-        }
-        return feed;
-    }
-    
-    @GET
-    @Produces({"text/html", "application/xhtml+xml" })
-    @Path("alternate/{id}")
-    public String getAlternateFeed(@PathParam("id") int page) {
-        List<LogRecord> list = new LinkedList<LogRecord>();
-        fillSubList(list, page, getCurrentCondition());
-        Collections.sort(list, new LogRecordComparator());
-        return convertEntriesToHtml(list);
-        
-    }
-
-    @GET
-    @Produces({"text/html", "application/xhtml+xml" })
-    @Path("subscribe/alternate")
-    public StreamingOutput getAlternateContinuousFeed(@HeaderParam("requestId") String reqid) {
-        final String key = reqid == null ? "*" : reqid;
-        return new StreamingOutput() {
-            public void write(final OutputStream out) throws IOException, WebApplicationException {
-                // return the last entry
-                out.write(convertEntryToHtmlFragment(((LinkedList<LogRecord>)records).getLast()).getBytes());
-                
-                activeStreams.put(key, out);
-            }
-        };
-    }
-    
-
-    @GET
-    @Produces("application/atom+xml;type=entry")
-    @Path("subscribe")
-    public StreamingResponse<Entry> getXmlContinuousFeed(@HeaderParam("requestId") String reqid) {
-        final String key = reqid == null ? "*" : reqid;
-        return new StreamingResponse<Entry>() {
-            public void writeTo(final StreamingResponse.Writer<Entry> out) throws IOException {
-                // return the last entry
-                Entry entry = (Entry) new StandardConverter(StandardConverter.Output.ENTRY, 
-                                                            StandardConverter.Multiplicity.ONE, 
-                                                            StandardConverter.Format.CONTENT)
-                    .convert(Collections.singletonList(((LinkedList<LogRecord>)records).getLast())).get(0);
-                out.write(entry);
-
-                activeStreams.put(key, out);
-            }
-        };
-    }
-
-    @GET
-    @Produces("text/plain")
-    @Path("unsubscribe/{key}")
-    public Boolean unsubscribeContinuousFeed(@PathParam("key") String key) {
-        return activeStreams.remove(key) != null;
-    }
-
-    @GET
-    @Path("entry/{id}")
-    @Produces("application/atom+xml;type=entry")
-    public Entry getEntry(@PathParam("id") int index) {
-        List<LogRecord> list = getLogRecords(index, getCurrentCondition());
-        return (Entry)new CustomEntryConverter(index).convert(list).get(0);
-    }
-    
-    @GET
-    @Path("entry/alternate/{id}")
-    @Produces({"text/html", "application/xhtml+xml" })
-    public String getAlternateEntry(@PathParam("id") int index) {
-        List<LogRecord> logRecords = getLogRecords(index, getCurrentCondition());
-        return convertEntryToHtml(logRecords.get(0));
-    }
-    
-    @GET
-    @Path("records")
-    @Produces("text/plain")
-    public int getNumberOfAvailableRecords() {
-        return recordsSize;
-    }
-    
-    private List<LogRecord> getLogRecords(int index, SearchCondition<LogRecord> theSearch) {
-        List<LogRecord> list = new LinkedList<LogRecord>();
-        if (storage != null) {
-            int storageSize = storage.getSize();
-            if (recordsSize == -1 || index < storageSize) {
-                storage.load(list, theSearch, index, 1);
-            } else if (index < recordsSize) {
-                list.add(records.get(index - storageSize));   
-            }
-        } else {
-            list.add(records.get(index));
-        }
-        if (list.size() != 1) { 
-            throw ExceptionUtils.toNotFoundException(null, null);
-        }
-        return list;
-    }
-    
-    
-    protected int fillSubList(List<LogRecord> list, int page, SearchCondition<LogRecord> theSearch) {
-        int oldListSize = list.size();
-        
-        if (storage != null) {
-            page = storage.load(list, theSearch, page, pageSize);
-        }
-        
-        if (recordsSize == -1 || recordsSize == 0 || list.size() == pageSize) {
-            return page;
-        }
-        
-        int fromIndex = page == 1 ? list.size() 
-                                  : (page - 1) * pageSize + list.size();
-        if (fromIndex > recordsSize) {
-            // this should not happen really
-            page = 1;
-            fromIndex = 0;
-        }
-        int toIndex = page * pageSize;
-        if (toIndex > recordsSize) {
-            toIndex = recordsSize;
-        }
-        int offset = storage != null ? pageSize - (list.size() - oldListSize) : 0;
-        fromIndex -= offset;
-        toIndex -= offset;
-        list.addAll(filterRecords(records.subList(fromIndex, toIndex), theSearch));
-        
-        
-        if (theSearch != null && list.size() < pageSize && page * pageSize < recordsSize) {
-            return fillSubList(list, page + 1, theSearch);    
-        } else {
-            return page;
-        }
-    }
-    
-    private List<LogRecord> filterRecords(List<LogRecord> list, SearchCondition<LogRecord> theSearch) {
-        return theSearch == null ? list : theSearch.findAll(list);
-    }
-    
-    private SearchCondition<LogRecord> getCurrentCondition() {
-        SearchCondition<LogRecord> current = context.getContext(SearchContext.class)
-            .getCondition(LogRecord.class);
-        if (current == null) {
-            return readableStorageCondition;
-        } else {
-            return current;
-        }
-    }
-    
-    private String getSearchExpression() {
-        return context.getContext(SearchContext.class).getSearchExpression();
-    }
-    
-    protected void setFeedPageProperties(Feed feed, int page, int lastPage) {
-        String self = context.getUriInfo().getAbsolutePath().toString();
-        feed.addLink(self, "self");
-        
-        int feedSize = feed.getEntries().size();
-        String searchExpression = getSearchExpression();
-        
-        String uri = context.getUriInfo().getBaseUriBuilder().path("logs").build().toString();
-        feed.addLink(uri + "/alternate/" + page, "alternate");
-        if (recordsSize != -1) {
-            if (page > 2) {
-                feed.addLink(createLinkUri(uri, searchExpression), "first");
-            }
-            
-            if (searchExpression == null && lastPage * pageSize < recordsSize
-                || searchExpression != null && feedSize == pageSize) {
-                feed.addLink(createLinkUri(uri + "/" + (lastPage + 1), searchExpression), "next");
-            }
-            
-            if (searchExpression == null && page * (pageSize + 1) < recordsSize) {
-                feed.addLink(uri + "/" + (recordsSize / pageSize + 1), "last");
-            }
-        } else if (feedSize == pageSize) {
-            feed.addLink(createLinkUri(uri + "/" + (lastPage + 1), searchExpression), "next");
-        }
-        if (page > 1) {
-            uri = page > 2 ? uri + "/" + (page - 1) : uri;
-            feed.addLink(createLinkUri(uri, searchExpression), "previous");
-        }
-    }
-    
-    private String createLinkUri(String uri, String search) {
-        return search == null ? uri : uri + "?_s=" + search; 
-    }
-    
-    public void publish(LogRecord record) {
-        if (alreadyClosed) {
-            System.err.println("AtomPullServer has been closed, the following log record can not be saved : "
-                               + record.toString());
-            return;
-        }
-        synchronized (records) {
-            if (records.size() == maxInMemorySize) {
-                if (storage instanceof ReadWriteLogStorage) {
-                    ((ReadWriteLogStorage)storage).save(records);
-                    records.clear();
-                } else {
-                    LogRecord oldRecord = records.remove(0);
-                    System.err.println("The oldest log record is removed : " + oldRecord.toString());
-                }
-            } 
-            records.add(record);
-            ++recordsSize;
-        }
-        submit(record);
-    }
-    
-    private void submit(final LogRecord record) {
-        //TODO use an executor to broadcast the record asynchronously
-        //TODO take the search condition in consideration to filter out those non-matching entries
-        if (activeStreams.size() > 0) {
-            byte[] rbytes = null;
-            Entry rentry = null;
-            for (Iterator<Object> it = activeStreams.values().iterator(); it.hasNext();) {
-                Object out = it.next();
-                try {
-                    if (out instanceof OutputStream) {
-                        if (rbytes == null) {
-                            rbytes = convertEntryToHtmlFragment(record).getBytes();
-                        }
-                        ((OutputStream)out).write(rbytes);
-                    } else if (out instanceof StreamingResponse.Writer) {
-                        ClassLoader cl = Thread.currentThread().getContextClassLoader();
-                        try {
-                            // switch TCCL as abdera uses TCCL to load various abdera classes during serialization 
-                            Thread.currentThread().setContextClassLoader(StandardConverter.class.getClassLoader());
-                            if (rentry == null) {
-                                rentry = (Entry) new StandardConverter(StandardConverter.Output.ENTRY, 
-                                                                       StandardConverter.Multiplicity.ONE, 
-                                                                       StandardConverter.Format.CONTENT)
-                                    .convert(Collections.singletonList(record)).get(0);
-                            }
-                            ((StreamingResponse.Writer<Entry>)out).write(rentry);
-                        } finally {
-                            Thread.currentThread().setContextClassLoader(cl);
-                        }
-                    }
-                } catch (Throwable t) {
-                    // REVISIT
-                    // the reason for not logging anything here is to not further clog the logger 
-                    // with this log broadcasting failure.
-                    System.err.print("ERROR | AtomPullServer | " + t + "; Unregistering " + out);
-                    it.remove();
-                }
-            }
-        }
-    }
-
-    public void setPageSize(int size) {
-        pageSize = size;
-    }
-
-    public void setMaxInMemorySize(int maxInMemorySize) {
-        this.maxInMemorySize = maxInMemorySize;
-    }
-
-    public void setStorage(ReadableLogStorage storage) {
-        this.storage = storage;
-    }
-    
-    public void close() {
-        if (alreadyClosed) {
-            return;
-        }
-        alreadyClosed = true;
-        if (storage instanceof ReadWriteLogStorage) {
-            ((ReadWriteLogStorage)storage).save(records);
-        }
-    }
-    
-    public synchronized void reset() {
-        records.clear();
-        recordsSize = 0;
-        feeds.clear();
-        activeStreams.clear();
-    }
-    
-    // TODO : this all can be done later on in a simple xslt template
-    private String convertEntriesToHtml(List<LogRecord> rs) {
-        StringBuilder sb = new StringBuilder();
-        startHtmlHeadAndBody(sb, "CXF Service Log Entries");
-        addRecordToTable(sb, rs, true);
-        sb.append("</body></html>");
-        return sb.toString();
-    }
-    // TODO : this all can be done later on in a simple xslt template
-    private String convertEntryToHtml(LogRecord r) {
-        StringBuilder sb = new StringBuilder();
-        startHtmlHeadAndBody(sb, r.getLevel().toString());
-        addRecordToTable(sb, Collections.singletonList(r), false);
-        sb.append("</body></html>");
-        return sb.toString();
-    }
-    private String convertEntryToHtmlFragment(LogRecord r) {
-        StringBuilder sb = new StringBuilder();
-        DateFormat df = new SimpleDateFormat("dd/MM/yy HH:mm:ss");
-        addRecordToTable(sb, df, r, false);
-        return sb.toString();
-    }
-    
-    private void addRecordToTable(StringBuilder sb, List<LogRecord> list, boolean forFeed) {
-        DateFormat df = new SimpleDateFormat("dd/MM/yy HH:mm:ss");
-        sb.append("<table border=\"1\">");
-        sb.append("<tr><th>Date</th><th>Level</th><th>Logger</th><th>Message</th></tr>");
-        for (LogRecord lr : list) {
-            addRecordToTable(sb, df, lr, forFeed);
-        }
-        sb.append("</table><br/><br/>");
-    }
-    
-    private void addRecordToTable(StringBuilder sb, DateFormat df, LogRecord lr, boolean forFeed) {
-        sb.append("<tr>");
-        sb.append("<td>" + df.format(lr.getDate()) + "</td>");
-        sb.append("<td>" + lr.getLevel().toString() + "</td>");
-        sb.append("<td>" + lr.getLoggerName() + "</td>");
-        String message = null;
-        if (lr.getMessage().length() > 0) {
-            message =  lr.getThrowable().length() > 0 ? lr.getMessage() + " : " + lr.getThrowable()
-                       : lr.getMessage();
-        } else if (lr.getThrowable().length() > 0) {
-            message = lr.getThrowable();
-        } else {
-            message = "&nbsp";
-        }
-        if (forFeed && lr.getThrowable().length() > 0) {
-            message = message.substring(0, message.length() / 2);
-        }
-        sb.append("<td>" + message + "</td>");
-        sb.append("</tr>");
-    }
-
-    private void startHtmlHeadAndBody(StringBuilder sb, String title) {
-        sb.append("<html xmlns=\"http://www.w3.org/1999/xhtml\">");
-        sb.append("<head>");
-        sb.append("<meta http-equiv=\"content-type\" content=\"text/html;charset=UTF-8\"/>");
-        sb.append("<title>" + "Log record with level " + title + "</title>");
-        sb.append("</head>");
-        sb.append("<body>");
-    }
-    
-    private static class SearchConditionImpl implements SearchCondition<LogRecord> {
-        private LogRecord template;
-        
-        SearchConditionImpl(LogRecord l) {
-            this.template = l;
-        }
-
-        public boolean isMet(LogRecord pojo) {
-            
-            return (template.getLevel() == LogLevel.ALL
-                   || pojo.getLevel().compareTo(template.getLevel()) <= 0)
-                   && template.getLoggerName().equals(pojo.getLoggerName());
-        }
-
-        public LogRecord getCondition() {
-            return new LogRecord(template);
-        }
-
-        public ConditionType getConditionType() {
-            return ConditionType.CUSTOM;
-        }
-
-        public List<SearchCondition<LogRecord>> getSearchConditions() {
-            return null;
-        }
-
-        public List<LogRecord> findAll(Collection<LogRecord> pojos) {
-            List<LogRecord> list = new LinkedList<LogRecord>();
-            for (LogRecord r : pojos) {
-                if (isMet(r)) {
-                    list.add(r);
-                }
-            }
-            return list;
-        }
-
-        public PrimitiveStatement getStatement() {
-            return null;
-        }
-
-        public void accept(SearchConditionVisitor<LogRecord, ?> visitor) {
-        }
-    }
-    
-    private static class LogRecordComparator implements Comparator<LogRecord> {
-
-        public int compare(LogRecord r1, LogRecord r2) {
-            return r1.getDate().compareTo(r2.getDate()) * -1;
-        }
-        
-    }
-    
-    private class CustomFeedConverter extends StandardConverter {
-        private int page;
-        CustomFeedConverter(int page) {
-            super(Output.FEED, Multiplicity.MANY, Format.CONTENT);
-            this.page = page;
-        }
-        
-        @Override
-        protected void setDefaultEntryProperties(Entry entry, List<LogRecord> rs, int entryIndex) {
-            super.setDefaultEntryProperties(entry, rs, entryIndex);
-            UriBuilder builder = context.getUriInfo().getAbsolutePathBuilder().path("entry");
-            Integer realIndex = page == 1 ? entryIndex : page * pageSize + entryIndex;
-
-            entry.addLink(builder.clone().path(realIndex.toString()).build().toString(), "self");
-            entry.addLink(builder.path("alternate").path(realIndex.toString()).build().toString(), 
-                          "alternate");
-        }
-        
-    }
-    
-    private class CustomEntryConverter extends StandardConverter {
-        private String selfFragment;
-        private String altFragment;
-        CustomEntryConverter(int index) {
-            super(Output.ENTRY, Multiplicity.ONE, Format.CONTENT);
-            this.selfFragment = "logs/entry/" + index;
-            this.altFragment = "logs/alternate/entry/" + index;
-        }
-        
-        @Override
-        protected void setDefaultEntryProperties(Entry entry, List<LogRecord> rs, int entryIndex) {
-            super.setDefaultEntryProperties(entry, rs, entryIndex);
-            entry.addLink(context.getUriInfo().getBaseUriBuilder().path(selfFragment).build().toString(),
-                "self");
-            entry.addLink(context.getUriInfo().getBaseUriBuilder().path(altFragment).build().toString(),
-                "alternate");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushBean.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushBean.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushBean.java
deleted file mode 100644
index 6e0c096..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushBean.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-import java.util.logging.Handler;
-
-import org.apache.commons.lang3.Validate;
-import org.apache.cxf.management.web.logging.atom.converter.Converter;
-import org.apache.cxf.management.web.logging.atom.deliverer.Deliverer;
-
-/**
- * Bean used to configure {@link AtomPushHandler JUL handler} with Spring instead of properties file. See
- * {@link AtomPushHandler} class for detailed description of parameters. Next to configuration of handler,
- * Spring bean offers simple configuration of associated loggers that share ATOM push-style handler.
- * <p>
- * General rules:
- * <ul>
- * <li>When {@link #setDeliverer(Deliverer) deliverer} property is not set explicitly, URL must be set to
- * create default deliverer.</li>
- * <li>When {@link #setConverter(Converter) converter} property is not set explicitly, default converter is
- * created.</li>
- * <li>When {@link #setLoggers(String) loggers} property is used, it overrides pair of
- * {@link #setLogger(String) logger} and {@link #setLevel(String) level} properties; and vice versa.</li>
- * <li>When logger is not set, handler is attached to root logger (named ""); when level is not set for
- * logger, default "INFO" level is used.</li>
- * <li>When {@link #setBatchSize(String) batchSize} property is not set or set to wrong value, default batch
- * size of "1" is used.</li>
- * <li>When deliverer property is NOT set, use of "retryXxx" properties causes creation of retrying default
- * deliverer.</li>
- * </ul>
- * Examples:
- * <p>
- * ATOM push handler with registered with root logger for all levels or log events, pushing one feed per event
- * to specified URL, using default delivery and conversion methods:
- * 
- * <pre>
- *   &lt;bean class=&quot;org.apache.cxf.jaxrs.ext.logging.atom.AtomPushBean&quot; 
- *     init-method=&quot;init&quot;&gt;
- *       &lt;property name=&quot;url&quot; value=&quot;http://localhost:9080/feed&quot;/&gt;
- *       &lt;property name=&quot;level&quot; value=&quot;ALL&quot; /&gt;
- *   &lt;/bean&gt;
- * </pre>
- * 
- * ATOM push handler registered with multiple loggers and listening for different levels (see
- * {@link #setLoggers(String) loggers} property description for syntax details). Custom deliverer will take
- * care of feeds, each of which carries batch of 10 log events:
- * 
- * <pre>
- *   &lt;bean id=&quot;soapDeliverer&quot; ...
- *   ...
- *   &lt;bean class=&quot;org.apache.cxf.jaxrs.ext.logging.atom.AtomPushBean&quot; 
- *     init-method=&quot;init&quot;&gt;
- *       &lt;property name=&quot;deliverer&quot;&gt;
- *           &lt;ref bean=&quot;soapDeliverer&quot;/&gt;
- *       &lt;/property&gt;
- *       &lt;property name=&quot;loggers&quot; value=&quot;
- *           org.apache.cxf:DEBUG,
- *           org.apache.cxf.jaxrs,
- *           org.apache.cxf.bus:ERROR&quot; /&gt;
- *       &lt;property name=&quot;batchSize&quot; value=&quot;10&quot; /&gt;
- *   &lt;/bean&gt;
- * </pre>
- */
-public final class AtomPushBean extends AbstractAtomBean {
-
-    private AtomPushEngineConfigurator conf = new AtomPushEngineConfigurator();
-    
-    /**
-     * Creates unconfigured and uninitialized bean. To configure setters must be used, then {@link #init()}
-     * must be called.
-     */
-    public AtomPushBean() {
-    }
-
-    /**
-     * Set URL used when custom deliverer is not set (default deliverer is being created).
-     */
-    public void setUrl(String url) {
-        checkInit();
-        Validate.notNull(url, "url is null");
-        conf.setUrl(url);
-    }
-
-    /**
-     * Set initialized deliverer.
-     */
-    public void setDeliverer(Deliverer deliverer) {
-        checkInit();
-        Validate.notNull(deliverer, "deliverer is null");
-        conf.setDeliverer(deliverer);
-    }
-
-    /**
-     * Set initialized converter.
-     */
-    public void setConverter(Converter converter) {
-        checkInit();
-        Validate.notNull(converter, "converter is null");
-        conf.setConverter(converter);
-    }
-    
-    /**
-     * Size of batch; empty string for default one element batch.
-     */
-    public void setBatchSize(String batchSize) {
-        checkInit();
-        Validate.notNull(batchSize, "batchSize is null");
-        conf.setBatchSize(batchSize);
-    }
-    
-    /**
-     * Batch cleanup time in minutes
-     */
-    public void setBatchCleanupTime(String batchCleanupTime) {
-        checkInit();
-        Validate.notNull(batchCleanupTime, "batchCleanup is null");
-        conf.setBatchCleanupTime(batchCleanupTime);
-    }
-
-    /**
-     * Retry pause calculation strategy, either "linear" or "exponential".
-     */
-    public void setRetryPause(String retryPause) {
-        checkInit();
-        Validate.notNull(retryPause, "retryPause is null");
-        conf.setRetryPause(retryPause);
-    }
-
-    /**
-     * Retry pause time (in seconds).
-     */
-    public void setRetryPauseTime(String time) {
-        checkInit();
-        Validate.notNull(time, "time is null");
-        conf.setRetryPauseTime(time);
-    }
-
-    /**
-     * Retry timeout (in seconds).
-     */
-    public void setRetryTimeout(String timeout) {
-        checkInit();
-        Validate.notNull(timeout, "timeout is null");
-        conf.setRetryTimeout(timeout);
-    }
-
-    /**
-     * Conversion output type: "feed" or "entry".
-     */
-    public void setOutput(String output) {
-        checkInit();
-        Validate.notNull(output, "output is null");
-        conf.setOutput(output);
-    }
-
-    /**
-     * Multiplicity of subelement of output: "one" or "many".
-     */
-    public void setMultiplicity(String multiplicity) {
-        checkInit();
-        Validate.notNull(multiplicity, "multiplicity is null");
-        conf.setMultiplicity(multiplicity);
-    }
-
-    /**
-     * Entry data format: "content" or "extension".
-     */
-    public void setFormat(String format) {
-        checkInit();
-        Validate.notNull(format, "format is null");
-        conf.setFormat(format);
-    }
-
-    protected Handler createHandler() {
-        return new AtomPushHandler(conf.createEngine());
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngine.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngine.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngine.java
deleted file mode 100644
index 3069a73..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngine.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.abdera.model.Element;
-import org.apache.commons.lang3.Validate;
-import org.apache.cxf.management.web.logging.LogRecord;
-import org.apache.cxf.management.web.logging.atom.converter.Converter;
-import org.apache.cxf.management.web.logging.atom.deliverer.Deliverer;
-
-/**
- * Package private ATOM push-style engine. Engine enqueues log records as they are {@link #publish(LogRecord)
- * published}. After queue size exceeds {@link #getBatchSize() batch size} processing of collection of these
- * records (in size of batch size) is triggered.
- * <p>
- * Processing is done in separate thread not to block publishing interface. Processing is two step: first list
- * of log records is transformed by {@link Converter converter} to ATOM {@link Element element} and then it is
- * pushed out by {@link Deliverer deliverer} to client. Next to transport deliverer is indirectly responsible
- * for marshaling ATOM element to XML.
- * <p>
- * Processing is done by single threaded {@link java.util.concurrent.Executor executor}; next batch of records
- * is taken from queue only when currently processed batch finishes and queue has enough elements to proceed.
- * <p>
- * First failure of any delivery shuts engine down. To avoid this situation engine must have registered
- * reliable deliverer or use wrapping
- * {@link org.apache.cxf.jaxrs.ext.logging.atom.deliverer.RetryingDeliverer}.
- */
-// TODO add internal diagnostics - log messages somewhere except for logger :D
-final class AtomPushEngine {
-    private List<LogRecord> queue = new ArrayList<LogRecord>();
-    private ExecutorService executor = Executors.newSingleThreadExecutor();
-    private int batchSize = 1;
-    private int batchTime;
-    private Converter converter;
-    private Deliverer deliverer;
-    private Timer timer;
-    
-    /**
-     * Put record to publishing queue. Engine accepts published records only if is in proper state - is
-     * properly configured (has deliverer and converter registered) and is not shot down; otherwise calls to
-     * publish are ignored.
-     * 
-     * @param record record to be published.
-     */
-    public synchronized void publish(LogRecord record) {
-        Validate.notNull(record, "record is null");
-        if (isValid()) {
-            if (batchSize > 1 && batchTime > 0 && timer == null) {
-                createTimerTask((long)batchTime * 60L * 1000L);
-            }
-            queue.add(record);
-            if (queue.size() >= batchSize) {
-                publishAndReset();
-            }
-        } else {
-            handleUndeliveredRecords(Collections.singletonList(record), 
-                                     deliverer == null ? "" : deliverer.getEndpointAddress());
-        }
-    }
-    
-    protected synchronized void publishAndReset() {
-        publishBatch(queue, deliverer, converter);
-        queue = new ArrayList<LogRecord>();
-    }
-
-    /**
-     * Shuts engine down.
-     */
-    public synchronized void shutdown() {
-        cancelTimerTask();
-        if (isValid() && queue.size() > 0) {
-            publishAndReset();
-        }
-        executor.shutdown();
-        
-        try {
-            //wait a little to try and flush the batches
-            //it's not critical, but can avoid errors on the 
-            //console and such which could be confusing
-            executor.awaitTermination(20, TimeUnit.SECONDS);
-        } catch (InterruptedException e) {
-            //ignore
-        }
-    }
-
-    private boolean isValid() {
-        if (deliverer == null) {
-            // TODO report cause
-            ///System.err.println("deliverer is not set");
-            return false;
-        }
-        if (converter == null) {
-            //System.err.println("converter is not set");
-            return false;
-        }
-        return !executor.isShutdown();
-    }
-
-    private void publishBatch(final List<LogRecord> batch,
-                              final Deliverer d,
-                              final Converter c) {
-        executor.execute(new Runnable() {
-            public void run() {
-                try {
-                    LoggingThread.markSilent(true);
-                    List<? extends Element> elements = c.convert(batch);
-                    for (int i = 0; i < elements.size(); i++) {
-                        Element element = elements.get(i);
-                        if (!d.deliver(element)) {
-                            System.err.println("Delivery to " + d.getEndpointAddress() 
-                                + " failed, shutting engine down");
-                            List<LogRecord> undelivered = null;
-                            if (i == 0) {
-                                undelivered = batch;
-                            } else {
-                                int index = (batch.size() / elements.size()) * i;
-                                // should not happen but just in case :-)
-                                if (index < batch.size()) {
-                                    undelivered = batch.subList(index, batch.size());
-                                }
-                            }
-                            handleUndeliveredRecords(undelivered, d.getEndpointAddress());
-                            shutdown();
-                            break;
-                        }
-                    }
-                } catch (InterruptedException e) {
-                    // no action
-                } finally {
-                    LoggingThread.markSilent(false);
-                }
-            }
-        });
-    }
-
-    protected void handleUndeliveredRecords(List<LogRecord> records, String address) {
-        // TODO : save them to some transient storage perhaps ?
-        System.err.println("The following records have been undelivered to " + address + " : ");
-        for (LogRecord r : records) {
-            System.err.println(r.toString());
-        }
-    }
-    
-    public int getBatchSize() {
-        return batchSize;
-    }
-
-    public void setBatchSize(int batchSize) {
-        Validate.isTrue(batchSize > 0, "batch size is not greater than zero");
-        this.batchSize = batchSize;
-    }
-    
-    public void setBatchTime(int batchTime) {
-        this.batchTime = batchTime;
-    }
-
-    /**
-     * Creates a timer task which will periodically flush the batch queue
-     * thus ensuring log records won't become too 'stale'. 
-     * Ex, if we have a batch size 10 and only WARN records need to be delivered
-     * then without the periodic cleanup the consumers may not get prompt notifications
-     *  
-     * @param timeout
-     */
-    protected void createTimerTask(long timeout) {
-        timer = new Timer();
-        timer.schedule(new TimerTask() {
-            public void run() {
-                publishAndReset();
-            }
-        }, timeout);
-    }
-    
-    protected void cancelTimerTask() {
-        if (timer != null) {
-            timer.cancel();
-            timer = null;
-        }
-    }
-    
-    public synchronized Converter getConverter() {
-        return converter;
-    }
-
-    public synchronized void setConverter(Converter converter) {
-        Validate.notNull(converter, "converter is null");
-        this.converter = converter;
-    }
-
-    public synchronized Deliverer getDeliverer() {
-        return deliverer;
-    }
-
-    public synchronized void setDeliverer(Deliverer deliverer) {
-        Validate.notNull(deliverer, "deliverer is null");
-        this.deliverer = deliverer;
-    }
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngineConfigurator.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngineConfigurator.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngineConfigurator.java
deleted file mode 100644
index 78a8414..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushEngineConfigurator.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-import java.lang.reflect.Constructor;
-
-import org.apache.cxf.management.web.logging.atom.converter.Converter;
-import org.apache.cxf.management.web.logging.atom.converter.StandardConverter;
-import org.apache.cxf.management.web.logging.atom.converter.StandardConverter.Format;
-import org.apache.cxf.management.web.logging.atom.converter.StandardConverter.Multiplicity;
-import org.apache.cxf.management.web.logging.atom.converter.StandardConverter.Output;
-import org.apache.cxf.management.web.logging.atom.deliverer.Deliverer;
-import org.apache.cxf.management.web.logging.atom.deliverer.RetryingDeliverer;
-import org.apache.cxf.management.web.logging.atom.deliverer.WebClientDeliverer;
-
-/**
- * Package private interpreter of incomplete input of engine configuration. Used commonly by
- * {@link AtomPushHandler properties file} and {@link AtomPushBean spring} configuration schemes.
- */
-// TODO extract 'general rules' of interpretation in handler and bean and put here
-final class AtomPushEngineConfigurator {
-
-    private Deliverer deliverer;
-    private Converter converter;
-    private String delivererClass;
-    private String converterClass;
-    private String batchSize;
-    private String batchCleanupTime;
-    private String delivererUrl;
-    private String retryTimeout;
-    private String retryPause;
-    private String retryPauseTime;
-    private String output;
-    private String multiplicity;
-    private String format;
-
-    public void setUrl(String url) {
-        this.delivererUrl = url;
-    }
-
-    public void setRetryTimeout(String retryTimeout) {
-        this.retryTimeout = retryTimeout;
-    }
-
-    public void setRetryPause(String retryPause) {
-        this.retryPause = retryPause;
-    }
-
-    public void setRetryPauseTime(String retryPauseTime) {
-        this.retryPauseTime = retryPauseTime;
-    }
-
-    public void setBatchCleanupTime(String cleanupTime) {
-        this.batchCleanupTime = cleanupTime;
-    }
-    
-    public void setBatchSize(String batchSize) {
-        this.batchSize = batchSize;
-    }
-
-    public void setDeliverer(Deliverer deliverer) {
-        this.deliverer = deliverer;
-    }
-
-    public void setConverter(Converter converter) {
-        this.converter = converter;
-    }
-
-    public void setDelivererClass(String delivererClass) {
-        this.delivererClass = delivererClass;
-    }
-
-    public void setConverterClass(String converterClass) {
-        this.converterClass = converterClass;
-    }
-
-    public void setOutput(String output) {
-        this.output = output;
-    }
-
-    public void setMultiplicity(String multiplicity) {
-        this.multiplicity = multiplicity;
-    }
-
-    public void setFormat(String format) {
-        this.format = format;
-    }
-
-    public AtomPushEngine createEngine() {
-        Deliverer d = deliverer;
-        Converter c = converter;
-        int batch = parseInt(batchSize, 1, 1);
-        int batchTime = parseInt(batchCleanupTime, 0);
-        if (d == null) {
-            if (delivererUrl != null) {
-                if (delivererClass != null) {
-                    d = createDeliverer(delivererClass, delivererUrl);
-                } else {
-                    d = new WebClientDeliverer(delivererUrl);
-                }
-            } else {
-                throw new IllegalStateException("Either url, deliverer or "
-                                                + "deliverer class with url must be setup");
-            }
-        }
-        if (c == null) {
-            if (converterClass != null) {
-                c = createConverter(converterClass);
-            } else {
-                Output out = parseEnum(output, Output.FEED, Output.class);
-                Multiplicity defaultMul = out == Output.FEED ? Multiplicity.MANY
-                    : batch > 1 ? Multiplicity.MANY : Multiplicity.ONE; 
-                Multiplicity mul = parseEnum(multiplicity, defaultMul, Multiplicity.class);
-                Format form = parseEnum(format, Format.CONTENT, Format.class);
-                c = new StandardConverter(out, mul, form);
-                
-                if (retryPause != null) {
-                    int timeout = parseInt(retryTimeout, 0, 0);
-                    int pause = parseInt(retryPauseTime, 1, 30);
-                    boolean linear = !retryPause.equalsIgnoreCase("exponential");
-                    d = new RetryingDeliverer(d, timeout, pause, linear);
-                }
-            }
-        }
-        AtomPushEngine engine = new AtomPushEngine();
-        engine.setDeliverer(d);
-        engine.setConverter(c);
-        engine.setBatchSize(batch);
-        engine.setBatchTime(batchTime);
-        return engine;
-    }
-
-    private Deliverer createDeliverer(String clazz, String url) {
-        try {
-            Constructor<Deliverer> ctor = loadClass(clazz, Deliverer.class).getConstructor(String.class);
-            return ctor.newInstance(url);
-        } catch (Exception e) {
-            throw new IllegalArgumentException(e);
-        }
-    }
-
-    private Converter createConverter(String clazz) {
-        try {
-            Constructor<Converter> ctor = loadClass(clazz, Converter.class).getConstructor();
-            return ctor.newInstance();
-        } catch (Exception e) {
-            throw new IllegalArgumentException(e);
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    private <T> Class<T> loadClass(String clazz, Class<T> ifaceClass) throws ClassNotFoundException {
-        ClassLoader cl = Thread.currentThread().getContextClassLoader();
-        try {
-            return (Class<T>)cl.loadClass(clazz);
-        } catch (ClassNotFoundException e) {
-            try {
-                // clazz could be shorted (stripped package name) retry for interface location
-                String pkg = ifaceClass.getPackage().getName();
-                String clazz2 = pkg + "." + clazz;
-                return (Class<T>)cl.loadClass(clazz2);
-            } catch (Exception e1) {
-                throw new ClassNotFoundException(e.getMessage() + " or " + e1.getMessage());
-            }
-        }
-    }
-
-    private int parseInt(String property, int defaultValue) {
-        try {
-            return Integer.parseInt(property);
-        } catch (NumberFormatException e) {
-            return defaultValue;
-        }
-    }
-
-    private int parseInt(String property, int lessThan, int defaultValue) {
-        int ret = parseInt(property, defaultValue);
-        if (ret < lessThan) {
-            ret = defaultValue;
-        }
-        return ret;
-    }
-
-    private <T extends Enum<T>> T parseEnum(String value, T defaultValue, Class<T> enumClass) {
-        if (value == null || "".equals(value)) {
-            return defaultValue;
-        }
-        try {
-            return Enum.valueOf(enumClass, value.toUpperCase());
-        } catch (Exception e) {
-            return defaultValue;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushHandler.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushHandler.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushHandler.java
deleted file mode 100644
index 7bd6869..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushHandler.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-import java.util.logging.Handler;
-import java.util.logging.LogManager;
-
-import org.apache.cxf.management.web.logging.LogRecord;
-import org.apache.cxf.management.web.logging.atom.converter.Converter;
-import org.apache.cxf.management.web.logging.atom.deliverer.Deliverer;
-
-/**
- * Handler pushing log records in batches as Atom Feeds or Entries to registered client. Handler
- * responsibility is to adapt to JUL framework while most of job is delegated to {@link AtomPushEngine}.
- * <p>
- * For simple configuration using properties file (one global root-level handler of this class) following
- * properties prefixed with full name of this class can be used:
- * <ul>
- * <li><b>url</b> - URL where feeds will be pushed (mandatory parameter)</li>
- * <li><b>batchSize</b> - integer number specifying minimal number of published log records that trigger
- * processing and pushing ATOM document. If parameter is not set, is not greater than zero or is not a number,
- * batch size is set to 1.</li>
- * </ul>
- * Conversion of log records into ATOM Elements can be tuned up using following parameters. Note that not all
- * combinations are meaningful, see {@link org.apache.cxf.jaxrs.ext.logging.atom.converter.StandardConverter}
- * for details:
- * <ul>
- * <li><b>output</b> - ATOM Element type pushed out, either "feed" or "entry"; when not specified or invalid
- * value provided "feed" is used.</li>
- * <li><b>multiplicity</b> - multiplicity of subelement(entries in feed for output=="feed" or log records in
- * entry for output=="entry"), either "one" or "many"; when not specified or invalid value provided "one" is
- * used.</li>
- * <li><b>format</b> - method of embedding data in entry, either "content" or "extension"; when not specified
- * or invalid value provided "content" is used.</li>
- * </ul>
- * By default delivery is served by WebClientDeliverer which does not support reliability of transport.
- * Availability of any of this parameters enables retrying of default delivery. Detailed explanation of these
- * parameter, see {@link org.apache.cxf.jaxrs.ext.logging.atom.deliverer.RetryingDeliverer} class description.
- * <ul>
- * <li><b>retry.pause</b> - pausing strategy of delivery retries, either <b>linear</b> or <b>exponential</b>
- * value (mandatory parameter). If mispelled linear is used.</li>
- * <li><b>retry.pause.time</b> - pause time (in seconds) between retries. If parameter is not set, pause is
- * set to 30 seconds.</li>
- * <li><b>retry.timeout</b> - maximum time (in seconds) retrying will be continued. If not set timeout is not
- * set (infinite loop of retries).</li>
- * </ul>
- * Ultimate control on conversion and delivery is obtained specifying own implementation classes:
- * <ul>
- * <li><b>converter</b> - name of class implementing {@link Converter} class replacing default conversion and
- * its specific parameters ("output", "multiplicity" and "format") are ignored. For classes located in same
- * package as Converter interface only class name can be given e.g. instead of
- * "org.apache.cxf.jaxrs.ext.logging.atom.converter.FooBarConverter" one can specify "FooBarConverter".</li>
- * <li><b>deliverer</b> - name of class implementing {@link Deliverer} class replacing default delivery and
- * its specific parameters ("retry.Xxx") are ignored. For classes located in same package as Deliverer
- * interface only class name can be given e.g. instead of
- * "org.apache.cxf.jaxrs.ext.logging.atom.deliverer.WebClientDeliverer" one can specify 
- * "WebClientDeliverer".</li>
- * </ul>
- * Example:
- * 
- * <pre>
- * handlers = org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler, java.util.logging.ConsoleHandler
- * .level = INFO
- * 
- * # deliver to given URL triggering after each batch of 10 log records
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.url = http://localhost:9080
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.batchSize = 10
- * 
- * # enable retrying delivery every 10 seconds for 5 minutes
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.retry.pause = linear
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.retry.pause.time = 10
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.retry.timeout = 300
- * 
- * # output for AtomPub: push entries not feeds, each entry with one log record as &quot;atom:extension&quot; 
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.output = entry
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.multiplicity = one
- * org.apache.cxf.jaxrs.ext.logging.atom.AtomPushHandler.format = extension
- * ...
- * </pre>
- */
-public final class AtomPushHandler extends Handler {
-
-    private AtomPushEngine engine;
-    private boolean lazyConfig;
-
-    /**
-     * Creates handler with configuration taken from properties file.
-     */
-    public AtomPushHandler() {
-        // deferred configuration: configure() called from here would use utilities that attempt to log
-        // and create this handler instance in recursion; configure() will be called on first publish()
-        lazyConfig = true;
-    }
-
-    /**
-     * Creates handler with custom parameters.
-     * 
-     * @param batchSize batch size, see {@link AtomPushEngine#getBatchSize()}
-     * @param converter converter transforming logs into ATOM elements
-     * @param deliverer deliverer pushing ATOM elements to client
-     */
-    public AtomPushHandler(int batchSize, Converter converter, Deliverer deliverer) {
-        engine = new AtomPushEngine();
-        engine.setBatchSize(batchSize);
-        engine.setConverter(converter);
-        engine.setDeliverer(deliverer);
-    }
-
-    /**
-     * Creates handler using (package private).
-     * 
-     * @param engine configured engine.
-     */
-    AtomPushHandler(AtomPushEngine engine) {
-        this.engine = engine;
-    }
-
-    @Override
-    public synchronized void publish(java.util.logging.LogRecord record) {
-        if (LoggingThread.isSilent()) {
-            return;
-        }
-        LoggingThread.markSilent(true);
-        try {
-            if (lazyConfig) {
-                lazyConfig = false;
-                configure();
-            }
-            if (engine == null) {
-                return;
-            }
-            LogRecord rec = LogRecord.fromJUL(record);
-            engine.publish(rec);
-        } finally {
-            LoggingThread.markSilent(false);
-        }
-    }
-
-    @Override
-    public synchronized void close() throws SecurityException {
-        if (engine != null) {
-            engine.shutdown();
-        }
-        engine = null;
-    }
-
-    @Override
-    public synchronized void flush() {
-        // no-op
-    }
-
-    /**
-     * Configuration from properties. Aligned to JUL strategy - properties file is only for simple
-     * configuration: it allows configure one root handler with its parameters. What is even more dummy, JUL
-     * does not allow to iterate over configuration properties to make interpretation automated (e.g. using
-     * commons-beanutils)
-     */
-    private void configure() {
-        LogManager manager = LogManager.getLogManager();
-        String cname = getClass().getName();
-        AtomPushEngineConfigurator conf = new AtomPushEngineConfigurator();
-        conf.setUrl(manager.getProperty(cname + ".url"));
-        conf.setDelivererClass(manager.getProperty(cname + ".deliverer"));
-        conf.setConverterClass(manager.getProperty(cname + ".converter"));
-        conf.setBatchSize(manager.getProperty(cname + ".batchSize"));
-        conf.setBatchCleanupTime(manager.getProperty(cname + ".batchCleanupTime"));
-        conf.setRetryPause(manager.getProperty(cname + ".retry.pause"));
-        conf.setRetryPauseTime(manager.getProperty(cname + ".retry.pause.time"));
-        conf.setRetryTimeout(manager.getProperty(cname + ".retry.timeout"));
-        conf.setOutput(manager.getProperty(cname + ".output"));
-        conf.setMultiplicity(manager.getProperty(cname + ".multiplicity"));
-        conf.setFormat(manager.getProperty(cname + ".format"));
-        engine = conf.createEngine();
-    }
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushOverWebSocketServer.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushOverWebSocketServer.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushOverWebSocketServer.java
deleted file mode 100644
index 11f04df..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/AtomPushOverWebSocketServer.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.logging.Handler;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-
-import org.apache.abdera.model.Element;
-import org.apache.abdera.model.Feed;
-import org.apache.commons.lang3.Validate;
-import org.apache.cxf.jaxrs.ext.StreamingResponse;
-import org.apache.cxf.management.web.logging.atom.converter.Converter;
-import org.apache.cxf.management.web.logging.atom.deliverer.Deliverer;
-
-/**
- * Bean used to configure {@link AtomPushHandler JUL handler} with Spring instead of properties file. See
- * {@link AtomPushHandler} class for detailed description of parameters. Next to configuration of handler,
- * Spring bean offers simple configuration of associated loggers that share ATOM push-style handler.
- * <p>
- * General rules:
- * <ul>
- * <li>When {@link #setConverter(Converter) converter} property is not set explicitly, default converter is
- * created.</li>
- * <li>When {@link #setLoggers(String) loggers} property is used, it overrides pair of
- * {@link #setLogger(String) logger} and {@link #setLevel(String) level} properties; and vice versa.</li>
- * <li>When logger is not set, handler is attached to root logger (named ""); when level is not set for
- * logger, default "INFO" level is used.</li>
- * <li>When {@link #setBatchSize(String) batchSize} property is not set or set to wrong value, default batch
- * size of "1" is used.</li>
- * <li>When deliverer property is NOT set, use of "retryXxx" properties causes creation of retrying default
- * deliverer.</li>
- * </ul>
- * Examples:
- * <p>
- * ATOM push handler with registered with root logger for all levels or log events, pushing one feed per event
- * over the connected websocket, using default conversion methods:
- * 
- * <pre>
- *   &lt;bean class=&quot;org.apache.cxf.jaxrs.ext.logging.atom.AtomPushOverWebSocketBean&quot; 
- *     init-method=&quot;init&quot;&gt;
- *       &lt;property name=&quot;level&quot; value=&quot;ALL&quot; /&gt;
- *   &lt;/bean&gt;
- * </pre>
- * 
- * ATOM push handler registered with multiple loggers and listening for different levels (see
- * {@link #setLoggers(String) loggers} property description for syntax details). Custom deliverer will take
- * care of feeds, each of which carries batch of 10 log events:
- * 
- * <pre>
- *   ...
- *   &lt;bean class=&quot;org.apache.cxf.jaxrs.ext.logging.atom.AtomPushOverWebSocketServer&quot; 
- *     init-method=&quot;init&quot;&gt;
- *       &lt;property name=&quot;loggers&quot; value=&quot;
- *           org.apache.cxf:DEBUG,
- *           org.apache.cxf.jaxrs,
- *           org.apache.cxf.bus:ERROR&quot; /&gt;
- *       &lt;property name=&quot;batchSize&quot; value=&quot;10&quot; /&gt;
- *   &lt;/bean&gt;
- * </pre>
- */
-//REVISIT we will move the common part into AbstractAtomPushBean so that it can be shared by both AtomPushBean and this
-@Path("/logs2")
-public final class AtomPushOverWebSocketServer extends AbstractAtomBean {
-    private AtomPushEngineConfigurator conf = new AtomPushEngineConfigurator();
-    private Map<String, Object> activeStreams;
-
-    /**
-     * Creates unconfigured and uninitialized bean. To configure setters must be used, then {@link #init()}
-     * must be called.
-     */
-    public AtomPushOverWebSocketServer() {
-        conf.setDeliverer(new WebSocketDeliverer());
-    }
-
-    @Override
-    public void init() {
-        super.init();
-        activeStreams = Collections.synchronizedMap(new HashMap<String, Object>());
-    }
-
-    /**
-     * Set initialized converter.
-     */
-    public void setConverter(Converter converter) {
-        checkInit();
-        Validate.notNull(converter, "converter is null");
-        conf.setConverter(converter);
-    }
-    
-    /**
-     * Size of batch; empty string for default one element batch.
-     */
-    public void setBatchSize(String batchSize) {
-        checkInit();
-        Validate.notNull(batchSize, "batchSize is null");
-        conf.setBatchSize(batchSize);
-    }
-    
-    /**
-     * Batch cleanup time in minutes
-     */
-    public void setBatchCleanupTime(String batchCleanupTime) {
-        checkInit();
-        Validate.notNull(batchCleanupTime, "batchCleanup is null");
-        conf.setBatchCleanupTime(batchCleanupTime);
-    }
-
-    /**
-     * Retry pause calculation strategy, either "linear" or "exponential".
-     */
-    public void setRetryPause(String retryPause) {
-        checkInit();
-        Validate.notNull(retryPause, "retryPause is null");
-        conf.setRetryPause(retryPause);
-    }
-
-    /**
-     * Retry pause time (in seconds).
-     */
-    public void setRetryPauseTime(String time) {
-        checkInit();
-        Validate.notNull(time, "time is null");
-        conf.setRetryPauseTime(time);
-    }
-
-    /**
-     * Retry timeout (in seconds).
-     */
-    public void setRetryTimeout(String timeout) {
-        checkInit();
-        Validate.notNull(timeout, "timeout is null");
-        conf.setRetryTimeout(timeout);
-    }
-
-    /**
-     * Conversion output type: "feed" or "entry".
-     */
-    public void setOutput(String output) {
-        checkInit();
-        Validate.notNull(output, "output is null");
-        conf.setOutput(output);
-    }
-
-    /**
-     * Multiplicity of subelement of output: "one" or "many".
-     */
-    public void setMultiplicity(String multiplicity) {
-        checkInit();
-        Validate.notNull(multiplicity, "multiplicity is null");
-        conf.setMultiplicity(multiplicity);
-    }
-
-    /**
-     * Entry data format: "content" or "extension".
-     */
-    public void setFormat(String format) {
-        checkInit();
-        Validate.notNull(format, "format is null");
-        conf.setFormat(format);
-    }
-
-    protected Handler createHandler() {
-        return new AtomPushHandler(conf.createEngine());
-    }
-
-    @GET
-    @Produces("application/atom+xml")
-    @Path("subscribe")
-    public StreamingResponse<Feed> subscribeXmlFeed(@HeaderParam("requestId") String reqid) {
-        final String key = reqid == null ? "*" : reqid;
-        return new StreamingResponse<Feed>() {
-            public void writeTo(final StreamingResponse.Writer<Feed> out) throws IOException {
-                activeStreams.put(key,  out);
-            }
-        };
-    }
-
-    @GET
-    @Produces("text/plain")
-    @Path("unsubscribe/{key}")
-    public Boolean unsubscribeXmlFeed(@PathParam("key") String key) {
-        return activeStreams.remove(key) != null;
-    }
-
-    private class WebSocketDeliverer implements Deliverer {
-
-        @Override
-        public boolean deliver(Element element) throws InterruptedException {
-            if (activeStreams.size() > 0) {
-                for (Iterator<Object> it = activeStreams.values().iterator(); it.hasNext();) {
-                    Object out = it.next();
-                    try {
-                        if (out instanceof StreamingResponse.Writer) {
-                            ((StreamingResponse.Writer)out).write(element);
-                        }
-                    } catch (Throwable t) {
-                        // REVISIT
-                        // the reason for not logging anything here is to not further clog the logger 
-                        // with this log broadcasting failure.
-                        System.err.print("ERROR | AtomPushOverWebSocketServer | " + t + "; Unregistering " + out);
-                        it.remove();
-                    }
-                }
-            }
-
-            return true;
-        }
-
-        @Override
-        public String getEndpointAddress() {
-            //REVISIT return something else?
-            return null;
-        }
-        
-    }
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/Hold.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/Hold.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/Hold.java
deleted file mode 100644
index b347e13..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/Hold.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.cxf.management.web.logging.atom;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/*
- * Marker annotation to force the package-info.java to be compiled by javac 
- * to avoid problems with the maven compiler plugin always compiling everything
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target({ ElementType.PACKAGE })
-@interface Hold {
-
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/LoggingThread.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/LoggingThread.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/LoggingThread.java
deleted file mode 100644
index e8056e6..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/LoggingThread.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom;
-
-/**
- * Helps disable logging from calls of the same thread. Motivation: log handlers in this package causes other
- * threads (from executor) to start logging (by using JAXB that itself uses JUL) which in turn can be caught
- * by the same handler leading to infinite loop.
- * <p>
- * Other approach than using thread local storage would be scanning of stack trace of current thread to see if
- * root of call comes from same package as package of handler - it's less effective so TLS is using here.
- */
-final class LoggingThread {
-
-    private static ThreadLocal<Boolean> threadLocal = new ThreadLocal<Boolean>();
-
-    private LoggingThread() {
-    }
-
-    public static void markSilent(boolean silent) {
-        if (silent) {
-            threadLocal.set(Boolean.TRUE);
-        } else {
-            threadLocal.remove();
-        }
-    }
-
-    public static boolean isSilent() {
-        Boolean b = threadLocal.get();
-        if (b != null) {
-            return b;
-        }
-        return false;
-    }
-}

http://git-wip-us.apache.org/repos/asf/cxf/blob/7f4a24d0/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/converter/Converter.java
----------------------------------------------------------------------
diff --git a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/converter/Converter.java b/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/converter/Converter.java
deleted file mode 100644
index 0966854..0000000
--- a/rt/management-web/src/main/java/org/apache/cxf/management/web/logging/atom/converter/Converter.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cxf.management.web.logging.atom.converter;
-
-import java.util.List;
-
-import org.apache.abdera.model.Element;
-import org.apache.cxf.management.web.logging.LogRecord;
-
-/**
- * Converts batch of log records into one or more ATOM Elements to deliver.
- */
-public interface Converter {
-
-    /**
-     * Converts given collection.
-     * 
-     * @param records not-null collection of records
-     * @return non-empty collection of ATOM Elements that represent log records
-     */
-    List<? extends Element> convert(List<LogRecord> records);
-}