You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sa...@apache.org on 2020/06/12 10:38:25 UTC

[cassandra] branch cassandra-3.11 updated (e498539 -> 408f969)

This is an automated email from the ASF dual-hosted git repository.

samt pushed a change to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git.


    from e498539  Merge branch 'cassandra-3.0' into cassandra-3.11
     new c8c3c26  Fix nomenclature of deny and allow lists
     new 0a1e8d1  Merge branch 'cassandra-2.2' into cassandra-3.0
     new 408f969  Merge branch 'cassandra-3.0' into cassandra-3.11

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CHANGES.txt                                        |  1 +
 .../cassandra/auth/jmx/AuthorizationProxy.java     | 47 +++++++++++-----------
 .../cassandra/cql3/functions/UDFunction.java       | 22 +++++-----
 src/java/org/apache/cassandra/db/Directories.java  | 41 +++++++++----------
 ...Directories.java => DisallowedDirectories.java} | 29 ++++++-------
 ...sMBean.java => DisallowedDirectoriesMBean.java} |  2 +-
 .../org/apache/cassandra/db/DiskBoundaries.java    |  2 +-
 .../apache/cassandra/db/DiskBoundaryManager.java   |  6 +--
 .../db/compaction/AbstractCompactionStrategy.java  |  6 +--
 .../cassandra/db/compaction/LeveledManifest.java   |  2 +-
 .../org/apache/cassandra/dht/RangeStreamer.java    | 10 ++---
 .../cassandra/hints/HintsDispatchExecutor.java     |  2 +-
 .../org/apache/cassandra/hints/HintsStore.java     | 10 ++---
 .../cassandra/io/sstable/format/SSTableReader.java |  2 +-
 .../cassandra/service/DefaultFSErrorHandler.java   |  6 +--
 .../apache/cassandra/service/StorageService.java   |  2 +-
 test/unit/org/apache/cassandra/Util.java           |  4 +-
 .../cassandra/auth/jmx/AuthorizationProxyTest.java |  4 +-
 .../org/apache/cassandra/db/DirectoriesTest.java   |  2 +-
 .../cassandra/db/DiskBoundaryManagerTest.java      |  6 +--
 ....java => CorruptedSSTablesCompactionsTest.java} | 17 ++++----
 21 files changed, 112 insertions(+), 111 deletions(-)
 rename src/java/org/apache/cassandra/db/{BlacklistedDirectories.java => DisallowedDirectories.java} (80%)
 rename src/java/org/apache/cassandra/db/{BlacklistedDirectoriesMBean.java => DisallowedDirectoriesMBean.java} (96%)
 rename test/unit/org/apache/cassandra/db/compaction/{BlacklistingCompactionsTest.java => CorruptedSSTablesCompactionsTest.java} (94%)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra] 01/01: Merge branch 'cassandra-3.0' into cassandra-3.11

Posted by sa...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

samt pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 408f969f3c1f6cccdb26f4ef8daec010b87933b5
Merge: e498539 0a1e8d1
Author: Sam Tunnicliffe <sa...@beobal.com>
AuthorDate: Fri Jun 12 11:26:04 2020 +0100

    Merge branch 'cassandra-3.0' into cassandra-3.11

 CHANGES.txt                                        |  1 +
 .../cassandra/auth/jmx/AuthorizationProxy.java     | 47 +++++++++++-----------
 .../cassandra/cql3/functions/UDFunction.java       | 22 +++++-----
 src/java/org/apache/cassandra/db/Directories.java  | 41 +++++++++----------
 ...Directories.java => DisallowedDirectories.java} | 29 ++++++-------
 ...sMBean.java => DisallowedDirectoriesMBean.java} |  2 +-
 .../org/apache/cassandra/db/DiskBoundaries.java    |  2 +-
 .../apache/cassandra/db/DiskBoundaryManager.java   |  6 +--
 .../db/compaction/AbstractCompactionStrategy.java  |  6 +--
 .../cassandra/db/compaction/LeveledManifest.java   |  2 +-
 .../org/apache/cassandra/dht/RangeStreamer.java    | 10 ++---
 .../cassandra/hints/HintsDispatchExecutor.java     |  2 +-
 .../org/apache/cassandra/hints/HintsStore.java     | 10 ++---
 .../cassandra/io/sstable/format/SSTableReader.java |  2 +-
 .../cassandra/service/DefaultFSErrorHandler.java   |  6 +--
 .../apache/cassandra/service/StorageService.java   |  2 +-
 test/unit/org/apache/cassandra/Util.java           |  4 +-
 .../cassandra/auth/jmx/AuthorizationProxyTest.java |  4 +-
 .../org/apache/cassandra/db/DirectoriesTest.java   |  2 +-
 .../cassandra/db/DiskBoundaryManagerTest.java      |  6 +--
 ....java => CorruptedSSTablesCompactionsTest.java} | 17 ++++----
 21 files changed, 112 insertions(+), 111 deletions(-)

diff --cc src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java
index 7bfbf52,0000000..ebc1763
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java
+++ b/src/java/org/apache/cassandra/auth/jmx/AuthorizationProxy.java
@@@ -1,494 -1,0 +1,493 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.auth.jmx;
 +
 +import java.lang.reflect.*;
 +import java.security.AccessControlContext;
 +import java.security.AccessController;
 +import java.security.Principal;
 +import java.util.Set;
 +import java.util.function.Function;
 +import java.util.function.Supplier;
 +import java.util.stream.Collectors;
 +import javax.management.MBeanServer;
 +import javax.management.MalformedObjectNameException;
 +import javax.management.ObjectName;
 +import javax.security.auth.Subject;
 +
 +import com.google.common.annotations.VisibleForTesting;
- import com.google.common.base.Throwables;
 +import com.google.common.collect.ImmutableSet;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import org.apache.cassandra.auth.*;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.service.StorageService;
 +
 +/**
 + * Provides a proxy interface to the platform's MBeanServer instance to perform
 + * role-based authorization on method invocation.
 + *
 + * When used in conjunction with a suitable JMXAuthenticator, which attaches a CassandraPrincipal
 + * to authenticated Subjects, this class uses the configured IAuthorizer to verify that the
 + * subject has the required permissions to execute methods on the MBeanServer and the MBeans it
 + * manages.
 + *
 + * Because an ObjectName may contain wildcards, meaning it represents a set of individual MBeans,
 + * JMX resources don't fit well with the hierarchical approach modelled by other IResource
 + * implementations and utilised by ClientState::ensureHasPermission etc. To enable grants to use
 + * pattern-type ObjectNames, this class performs its own custom matching and filtering of resources
 + * rather than pushing that down to the configured IAuthorizer. To that end, during authorization
 + * it pulls back all permissions for the active subject, filtering them to retain only grants on
 + * JMXResources. It then uses ObjectName::apply to assert whether the target MBeans are wholly
 + * represented by the resources with permissions. This means that it cannot use the PermissionsCache
 + * as IAuthorizer can, so it manages its own cache locally.
 + *
 + * Methods are split into 2 categories; those which are to be invoked on the MBeanServer itself
 + * and those which apply to MBean instances. Actually, this is somewhat of a construct as in fact
 + * *all* invocations are performed on the MBeanServer instance, the distinction is made here on
 + * those methods which take an ObjectName as their first argument and those which do not.
 + * Invoking a method of the former type, e.g. MBeanServer::getAttribute(ObjectName name, String attribute),
 + * implies that the caller is concerned with a specific MBean. Conversely, invoking a method such as
 + * MBeanServer::getDomains is primarily a function of the MBeanServer itself. This class makes
 + * such a distinction in order to identify which JMXResource the subject requires permissions on.
 + *
-  * Certain operations are never allowed for users and these are recorded in a blacklist so that we
++ * Certain operations are never allowed for users and these are recorded in a deny list so that we
 + * can short circuit authorization process if one is attempted by a remote subject.
 + *
 + */
 +public class AuthorizationProxy implements InvocationHandler
 +{
 +    private static final Logger logger = LoggerFactory.getLogger(AuthorizationProxy.class);
 +
 +    /*
-      A whitelist of permitted methods on the MBeanServer interface which *do not* take an ObjectName
++     A list of permitted methods on the MBeanServer interface which *do not* take an ObjectName
 +     as their first argument. These methods can be thought of as relating to the MBeanServer itself,
-      rather than to the MBeans it manages. All of the whitelisted methods are essentially descriptive,
++     rather than to the MBeans it manages. All of the allowed methods are essentially descriptive,
 +     hence they require the Subject to have the DESCRIBE permission on the root JMX resource.
 +     */
-     private static final Set<String> MBEAN_SERVER_METHOD_WHITELIST = ImmutableSet.of("getDefaultDomain",
-                                                                                      "getDomains",
-                                                                                      "getMBeanCount",
-                                                                                      "hashCode",
-                                                                                      "queryMBeans",
-                                                                                      "queryNames",
-                                                                                      "toString");
++    private static final Set<String> MBEAN_SERVER_ALLOWED_METHODS = ImmutableSet.of("getDefaultDomain",
++                                                                                    "getDomains",
++                                                                                    "getMBeanCount",
++                                                                                    "hashCode",
++                                                                                    "queryMBeans",
++                                                                                    "queryNames",
++                                                                                    "toString");
 +
 +    /*
-      A blacklist of method names which are never permitted to be executed by a remote user,
++     A list of method names which are never permitted to be executed by a remote user,
 +     regardless of privileges they may be granted.
 +     */
-     private static final Set<String> METHOD_BLACKLIST = ImmutableSet.of("createMBean",
-                                                                         "deserialize",
-                                                                         "getClassLoader",
-                                                                         "getClassLoaderFor",
-                                                                         "instantiate",
-                                                                         "registerMBean",
-                                                                         "unregisterMBean");
++    private static final Set<String> DENIED_METHODS = ImmutableSet.of("createMBean",
++                                                                      "deserialize",
++                                                                      "getClassLoader",
++                                                                      "getClassLoaderFor",
++                                                                      "instantiate",
++                                                                      "registerMBean",
++                                                                      "unregisterMBean");
 +
 +    private static final JMXPermissionsCache permissionsCache = new JMXPermissionsCache();
 +    private MBeanServer mbs;
 +
 +    /*
 +     Used to check whether the Role associated with the authenticated Subject has superuser
 +     status. By default, just delegates to Roles::hasSuperuserStatus, but can be overridden for testing.
 +     */
 +    protected Function<RoleResource, Boolean> isSuperuser = Roles::hasSuperuserStatus;
 +
 +    /*
 +     Used to retrieve the set of all permissions granted to a given role. By default, this fetches
 +     the permissions from the local cache, which in turn loads them from the configured IAuthorizer
 +     but can be overridden for testing.
 +     */
 +    protected Function<RoleResource, Set<PermissionDetails>> getPermissions = permissionsCache::get;
 +
 +    /*
 +     Used to decide whether authorization is enabled or not, usually this depends on the configured
 +     IAuthorizer, but can be overridden for testing.
 +     */
 +    protected Supplier<Boolean> isAuthzRequired = () -> DatabaseDescriptor.getAuthorizer().requireAuthorization();
 +
 +    /*
 +     Used to find matching MBeans when the invocation target is a pattern type ObjectName.
 +     Defaults to querying the MBeanServer but can be overridden for testing. See checkPattern for usage.
 +     */
 +    protected Function<ObjectName, Set<ObjectName>> queryNames = (name) -> mbs.queryNames(name, null);
 +
 +    /*
 +     Used to determine whether auth setup has completed so we know whether the expect the IAuthorizer
 +     to be ready. Can be overridden for testing.
 +     */
 +    protected Supplier<Boolean> isAuthSetupComplete = () -> StorageService.instance.isAuthSetupComplete();
 +
 +    @Override
 +    public Object invoke(Object proxy, Method method, Object[] args)
 +            throws Throwable
 +    {
 +        String methodName = method.getName();
 +
 +        if ("getMBeanServer".equals(methodName))
 +            throw new SecurityException("Access denied");
 +
 +        // Retrieve Subject from current AccessControlContext
 +        AccessControlContext acc = AccessController.getContext();
 +        Subject subject = Subject.getSubject(acc);
 +
 +        // Allow setMBeanServer iff performed on behalf of the connector server itself
 +        if (("setMBeanServer").equals(methodName))
 +        {
 +            if (subject != null)
 +                throw new SecurityException("Access denied");
 +
 +            if (args[0] == null)
 +                throw new IllegalArgumentException("Null MBeanServer");
 +
 +            if (mbs != null)
 +                throw new IllegalArgumentException("MBeanServer already initialized");
 +
 +            mbs = (MBeanServer) args[0];
 +            return null;
 +        }
 +
 +        if (authorize(subject, methodName, args))
 +            return invoke(method, args);
 +
 +        throw new SecurityException("Access Denied");
 +    }
 +
 +    /**
 +     * Performs the actual authorization of an identified subject to execute a remote method invocation.
 +     * @param subject The principal making the execution request. A null value represents a local invocation
 +     *                from the JMX connector itself
 +     * @param methodName Name of the method being invoked
 +     * @param args Array containing invocation argument. If the first element is an ObjectName instance, for
 +     *             authz purposes we consider this an invocation of an MBean method, otherwise it is treated
 +     *             as an invocation of a method on the MBeanServer.
 +     */
 +    @VisibleForTesting
 +    boolean authorize(Subject subject, String methodName, Object[] args)
 +    {
 +        logger.trace("Authorizing JMX method invocation {} for {}",
 +                     methodName,
 +                     subject == null ? "" :subject.toString().replaceAll("\\n", " "));
 +
 +        if (!isAuthSetupComplete.get())
 +        {
 +            logger.trace("Auth setup is not complete, refusing access");
 +            return false;
 +        }
 +
 +        // Permissive authorization is enabled
 +        if (!isAuthzRequired.get())
 +            return true;
 +
 +        // Allow operations performed locally on behalf of the connector server itself
 +        if (subject == null)
 +            return true;
 +
 +        // Restrict access to certain methods by any remote user
-         if (METHOD_BLACKLIST.contains(methodName))
++        if (DENIED_METHODS.contains(methodName))
 +        {
-             logger.trace("Access denied to blacklisted method {}", methodName);
++            logger.trace("Access denied to restricted method {}", methodName);
 +            return false;
 +        }
 +
 +        // Reject if the user has not authenticated
 +        Set<Principal> principals = subject.getPrincipals();
 +        if (principals == null || principals.isEmpty())
 +            return false;
 +
 +        // Currently, we assume that the first Principal returned from the Subject
 +        // is the one to use for authorization. It would be good to make this more
 +        // robust, but we have no control over which Principals a given LoginModule
 +        // might choose to associate with the Subject following successful authentication
 +        RoleResource userResource = RoleResource.role(principals.iterator().next().getName());
 +        // A role with superuser status can do anything
 +        if (isSuperuser.apply(userResource))
 +            return true;
 +
 +        // The method being invoked may be a method on an MBean, or it could belong
 +        // to the MBeanServer itself
 +        if (args != null && args[0] instanceof ObjectName)
 +            return authorizeMBeanMethod(userResource, methodName, args);
 +        else
 +            return authorizeMBeanServerMethod(userResource, methodName);
 +    }
 +
 +    /**
 +     * Authorize execution of a method on the MBeanServer which does not take an MBean ObjectName
-      * as its first argument. The whitelisted methods that match this criteria are generally
++     * as its first argument. The allowed methods that match this criteria are generally
 +     * descriptive methods concerned with the MBeanServer itself, rather than with any particular
 +     * set of MBeans managed by the server and so we check the DESCRIBE permission on the root
 +     * JMXResource (representing the MBeanServer)
 +     *
 +     * @param subject
 +     * @param methodName
 +     * @return the result of the method invocation, if authorized
 +     * @throws Throwable
 +     * @throws SecurityException if authorization fails
 +     */
 +    private boolean authorizeMBeanServerMethod(RoleResource subject, String methodName)
 +    {
 +        logger.trace("JMX invocation of {} on MBeanServer requires permission {}", methodName, Permission.DESCRIBE);
-         return (MBEAN_SERVER_METHOD_WHITELIST.contains(methodName) &&
-             hasPermission(subject, Permission.DESCRIBE, JMXResource.root()));
++        return (MBEAN_SERVER_ALLOWED_METHODS.contains(methodName) &&
++                hasPermission(subject, Permission.DESCRIBE, JMXResource.root()));
 +    }
 +
 +    /**
 +     * Authorize execution of a method on an MBean (or set of MBeans) which may be
 +     * managed by the MBeanServer. Note that this also includes the queryMBeans and queryNames
 +     * methods of MBeanServer as those both take an ObjectName (possibly a pattern containing
 +     * wildcards) as their first argument. They both of those methods also accept null arguments,
 +     * in which case they will be handled by authorizedMBeanServerMethod
 +     *
 +     * @param role
 +     * @param methodName
 +     * @param args
 +     * @return the result of the method invocation, if authorized
 +     * @throws Throwable
 +     * @throws SecurityException if authorization fails
 +     */
 +    private boolean authorizeMBeanMethod(RoleResource role, String methodName, Object[] args)
 +    {
 +        ObjectName targetBean = (ObjectName)args[0];
 +
 +        // work out which permission we need to execute the method being called on the mbean
 +        Permission requiredPermission = getRequiredPermission(methodName);
 +        if (null == requiredPermission)
 +            return false;
 +
 +        logger.trace("JMX invocation of {} on {} requires permission {}", methodName, targetBean, requiredPermission);
 +
 +        // find any JMXResources upon which the authenticated subject has been granted the
 +        // reqired permission. We'll do ObjectName-specific filtering & matching of resources later
 +        Set<JMXResource> permittedResources = getPermittedResources(role, requiredPermission);
 +
 +        if (permittedResources.isEmpty())
 +            return false;
 +
 +        // finally, check the JMXResource from the grants to see if we have either
 +        // an exact match or a wildcard match for the target resource, whichever is
 +        // applicable
 +        return targetBean.isPattern()
 +                ? checkPattern(targetBean, permittedResources)
 +                : checkExact(targetBean, permittedResources);
 +    }
 +
 +    /**
 +     * Get any grants of the required permission for the authenticated subject, regardless
 +     * of the resource the permission applies to as we'll do the filtering & matching in
 +     * the calling method
 +     * @param subject
 +     * @param required
 +     * @return the set of JMXResources upon which the subject has been granted the required permission
 +     */
 +    private Set<JMXResource> getPermittedResources(RoleResource subject, Permission required)
 +    {
 +        return getPermissions.apply(subject)
 +               .stream()
 +               .filter(details -> details.permission == required)
 +               .map(details -> (JMXResource)details.resource)
 +               .collect(Collectors.toSet());
 +    }
 +
 +    /**
 +     * Check whether a required permission has been granted to the authenticated subject on a specific resource
 +     * @param subject
 +     * @param permission
 +     * @param resource
 +     * @return true if the Subject has been granted the required permission on the specified resource; false otherwise
 +     */
 +    private boolean hasPermission(RoleResource subject, Permission permission, JMXResource resource)
 +    {
 +        return getPermissions.apply(subject)
 +               .stream()
 +               .anyMatch(details -> details.permission == permission && details.resource.equals(resource));
 +    }
 +
 +    /**
 +     * Given a set of JMXResources upon which the Subject has been granted a particular permission,
 +     * check whether any match the pattern-type ObjectName representing the target of the method
 +     * invocation. At this point, we are sure that whatever the required permission, the Subject
 +     * has definitely been granted it against this set of JMXResources. The job of this method is
 +     * only to verify that the target of the invocation is covered by the members of the set.
 +     *
 +     * @param target
 +     * @param permittedResources
 +     * @return true if all registered beans which match the target can also be matched by the
 +     *         JMXResources the subject has been granted permissions on; false otherwise
 +     */
 +    private boolean checkPattern(ObjectName target, Set<JMXResource> permittedResources)
 +    {
 +        // if the required permission was granted on the root JMX resource, then we're done
 +        if (permittedResources.contains(JMXResource.root()))
 +            return true;
 +
 +        // Get the full set of beans which match the target pattern
 +        Set<ObjectName> targetNames = queryNames.apply(target);
 +
 +        // Iterate over the resources the permission has been granted on. Some of these may
 +        // be patterns, so query the server to retrieve the full list of matching names and
 +        // remove those from the target set. Once the target set is empty (i.e. all required
 +        // matches have been satisfied), the requirement is met.
 +        // If there are still unsatisfied targets after all the JMXResources have been processed,
 +        // there are insufficient grants to permit the operation.
 +        for (JMXResource resource : permittedResources)
 +        {
 +            try
 +            {
 +                Set<ObjectName> matchingNames = queryNames.apply(ObjectName.getInstance(resource.getObjectName()));
 +                targetNames.removeAll(matchingNames);
 +                if (targetNames.isEmpty())
 +                    return true;
 +            }
 +            catch (MalformedObjectNameException e)
 +            {
 +                logger.warn("Permissions for JMX resource contains invalid ObjectName {}", resource.getObjectName());
 +            }
 +        }
 +
 +        logger.trace("Subject does not have sufficient permissions on all MBeans matching the target pattern {}", target);
 +        return false;
 +    }
 +
 +    /**
 +     * Given a set of JMXResources upon which the Subject has been granted a particular permission,
 +     * check whether any match the ObjectName representing the target of the method invocation.
 +     * At this point, we are sure that whatever the required permission, the Subject has definitely
 +     * been granted it against this set of JMXResources. The job of this method is only to verify
 +     * that the target of the invocation is matched by a member of the set.
 +     *
 +     * @param target
 +     * @param permittedResources
 +     * @return true if at least one of the permitted resources matches the target; false otherwise
 +     */
 +    private boolean checkExact(ObjectName target, Set<JMXResource> permittedResources)
 +    {
 +        // if the required permission was granted on the root JMX resource, then we're done
 +        if (permittedResources.contains(JMXResource.root()))
 +            return true;
 +
 +        for (JMXResource resource : permittedResources)
 +        {
 +            try
 +            {
 +                if (ObjectName.getInstance(resource.getObjectName()).apply(target))
 +                    return true;
 +            }
 +            catch (MalformedObjectNameException e)
 +            {
 +                logger.warn("Permissions for JMX resource contains invalid ObjectName {}", resource.getObjectName());
 +            }
 +        }
 +
 +        logger.trace("Subject does not have sufficient permissions on target MBean {}", target);
 +        return false;
 +    }
 +
 +    /**
 +     * Mapping between method names and the permission required to invoke them. Note, these
 +     * names refer to methods on MBean instances invoked via the MBeanServer.
 +     * @param methodName
 +     * @return
 +     */
 +    private static Permission getRequiredPermission(String methodName)
 +    {
 +        switch (methodName)
 +        {
 +            case "getAttribute":
 +            case "getAttributes":
 +                return Permission.SELECT;
 +            case "setAttribute":
 +            case "setAttributes":
 +                return Permission.MODIFY;
 +            case "invoke":
 +                return Permission.EXECUTE;
 +            case "getInstanceOf":
 +            case "getMBeanInfo":
 +            case "hashCode":
 +            case "isInstanceOf":
 +            case "isRegistered":
 +            case "queryMBeans":
 +            case "queryNames":
 +                return Permission.DESCRIBE;
 +            default:
 +                logger.debug("Access denied, method name {} does not map to any defined permission", methodName);
 +                return null;
 +        }
 +    }
 +
 +    /**
 +     * Invoke a method on the MBeanServer instance. This is called when authorization is not required (because
 +     * AllowAllAuthorizer is configured, or because the invocation is being performed by the JMXConnector
 +     * itself rather than by a connected client), and also when a call from an authenticated subject
 +     * has been successfully authorized
 +     *
 +     * @param method
 +     * @param args
 +     * @return
 +     * @throws Throwable
 +     */
 +    private Object invoke(Method method, Object[] args) throws Throwable
 +    {
 +        try
 +        {
 +            return method.invoke(mbs, args);
 +        }
 +        catch (InvocationTargetException e) //Catch any exception that might have been thrown by the mbeans
 +        {
 +            Throwable t = e.getCause(); //Throw the exception that nodetool etc expects
 +            throw t;
 +        }
 +    }
 +
 +    /**
 +     * Query the configured IAuthorizer for the set of all permissions granted on JMXResources to a specific subject
 +     * @param subject
 +     * @return All permissions granted to the specfied subject (including those transitively inherited from
 +     *         any roles the subject has been granted), filtered to include only permissions granted on
 +     *         JMXResources
 +     */
 +    private static Set<PermissionDetails> loadPermissions(RoleResource subject)
 +    {
 +        // get all permissions for the specified subject. We'll cache them as it's likely
 +        // we'll receive multiple lookups for the same subject (but for different resources
 +        // and permissions) in quick succession
 +        return DatabaseDescriptor.getAuthorizer().list(AuthenticatedUser.SYSTEM_USER, Permission.ALL, null, subject)
 +                                                 .stream()
 +                                                 .filter(details -> details.resource instanceof JMXResource)
 +                                                 .collect(Collectors.toSet());
 +    }
 +
 +    private static final class JMXPermissionsCache extends AuthCache<RoleResource, Set<PermissionDetails>>
 +    {
 +        protected JMXPermissionsCache()
 +        {
 +            super("JMXPermissionsCache",
 +                  DatabaseDescriptor::setPermissionsValidity,
 +                  DatabaseDescriptor::getPermissionsValidity,
 +                  DatabaseDescriptor::setPermissionsUpdateInterval,
 +                  DatabaseDescriptor::getPermissionsUpdateInterval,
 +                  DatabaseDescriptor::setPermissionsCacheMaxEntries,
 +                  DatabaseDescriptor::getPermissionsCacheMaxEntries,
 +                  AuthorizationProxy::loadPermissions,
 +                  () -> true);
 +        }
 +    }
 +}
diff --cc src/java/org/apache/cassandra/cql3/functions/UDFunction.java
index f8af619,27f9eb8..6928a06
--- a/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
+++ b/src/java/org/apache/cassandra/cql3/functions/UDFunction.java
@@@ -77,13 -75,11 +77,13 @@@ public abstract class UDFunction extend
      protected final TypeCodec<Object> returnCodec;
      protected final boolean calledOnNullInput;
  
 +    protected final UDFContext udfContext;
 +
      //
-     // Access to classes is controlled via a whitelist and a blacklist.
+     // Access to classes is controlled via allow and disallow lists.
      //
      // When a class is requested (both during compilation and runtime),
-     // the whitelistedPatterns array is searched first, whether the
+     // the allowedPatterns array is searched first, whether the
      // requested name matches one of the patterns. If not, nothing is
      // returned from the class-loader - meaning ClassNotFoundException
      // during runtime and "type could not resolved" during compilation.
@@@ -112,12 -108,10 +112,12 @@@
      "java/time/",
      "java/util/",
      "org/apache/cassandra/cql3/functions/JavaUDF.class",
 +    "org/apache/cassandra/cql3/functions/UDFContext.class",
      "org/apache/cassandra/exceptions/",
 +    "org/apache/cassandra/transport/ProtocolVersion.class"
      };
-     // Only need to blacklist a pattern, if it would otherwise be allowed via whitelistedPatterns
-     private static final String[] blacklistedPatterns =
+     // Only need to disallow a pattern, if it would otherwise be allowed via allowedPatterns
+     private static final String[] disallowedPatterns =
      {
      "com/datastax/driver/core/Cluster.class",
      "com/datastax/driver/core/Metrics.class",
diff --cc src/java/org/apache/cassandra/db/Directories.java
index 4731b3e,af3f63c..0ffa6be
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@@ -326,10 -310,10 +326,10 @@@ public class Directorie
  
      /**
       * Basically the same as calling {@link #getWriteableLocationAsFile(long)} with an unknown size ({@code -1L}),
-      * which may return any non-blacklisted directory - even a data directory that has no usable space.
+      * which may return any allowed directory - even a data directory that has no usable space.
       * Do not use this method in production code.
       *
-      * @throws FSWriteError if all directories are blacklisted.
 -     * @throws IOError if all directories are blocked.
++     * @throws FSWriteError if all directories are disallowed.
       */
      public File getDirectoryForNewSSTables()
      {
@@@ -337,9 -321,9 +337,9 @@@
      }
  
      /**
-      * Returns a non-blacklisted data directory that _currently_ has {@code writeSize} bytes as usable space.
+      * Returns an allowed directory that _currently_ has {@code writeSize} bytes as usable space.
       *
-      * @throws FSWriteError if all directories are blacklisted.
 -     * @throws IOError if all directories are disallowed.
++     * @throws FSWriteError if all directories are disallowed.
       */
      public File getWriteableLocationAsFile(long writeSize)
      {
@@@ -378,10 -359,9 +378,9 @@@
      }
  
      /**
-      * Returns a non-blacklisted data directory that _currently_ has {@code writeSize} bytes as usable space, null if
-      * there is not enough space left in all directories.
+      * Returns an allowed data directory that _currently_ has {@code writeSize} bytes as usable space.
       *
-      * @throws FSWriteError if all directories are blacklisted.
 -     * @throws IOError if all directories are disallowed.
++     * @throws FSWriteError if all directories are disallowed.
       */
      public DataDirectory getWriteableLocation(long writeSize)
      {
@@@ -470,26 -450,6 +469,26 @@@
          return totalAvailable > expectedTotalWriteSize;
      }
  
 +    public DataDirectory[] getWriteableLocations()
 +    {
-         List<DataDirectory> nonBlacklistedDirs = new ArrayList<>();
++        List<DataDirectory> allowedDirs = new ArrayList<>();
 +        for (DataDirectory dir : paths)
 +        {
-             if (!BlacklistedDirectories.isUnwritable(dir.location))
-                 nonBlacklistedDirs.add(dir);
++            if (!DisallowedDirectories.isUnwritable(dir.location))
++                allowedDirs.add(dir);
 +        }
 +
-         Collections.sort(nonBlacklistedDirs, new Comparator<DataDirectory>()
++        Collections.sort(allowedDirs, new Comparator<DataDirectory>()
 +        {
 +            @Override
 +            public int compare(DataDirectory o1, DataDirectory o2)
 +            {
 +                return o1.location.compareTo(o2.location);
 +            }
 +        });
-         return nonBlacklistedDirs.toArray(new DataDirectory[nonBlacklistedDirs.size()]);
++        return allowedDirs.toArray(new DataDirectory[allowedDirs.size()]);
 +    }
 +
      public static File getSnapshotDirectory(Descriptor desc, String snapshotName)
      {
          return getSnapshotDirectory(desc.directory, snapshotName);
diff --cc src/java/org/apache/cassandra/db/DisallowedDirectories.java
index cff9a78,75b5e79..f030253
--- a/src/java/org/apache/cassandra/db/DisallowedDirectories.java
+++ b/src/java/org/apache/cassandra/db/DisallowedDirectories.java
@@@ -40,11 -39,10 +40,12 @@@ public class DisallowedDirectories impl
      private final Set<File> unreadableDirectories = new CopyOnWriteArraySet<File>();
      private final Set<File> unwritableDirectories = new CopyOnWriteArraySet<File>();
  
 +    private static final AtomicInteger directoriesVersion = new AtomicInteger();
 +
-     private BlacklistedDirectories()
+     private DisallowedDirectories()
      {
          // Register this instance with JMX
+         MBeanWrapper.instance.registerMBean(this, DEPRECATED_MBEAN_NAME, MBeanWrapper.OnException.LOG);
          MBeanWrapper.instance.registerMBean(this, MBEAN_NAME, MBeanWrapper.OnException.LOG);
      }
  
@@@ -79,8 -67,7 +80,8 @@@
          File directory = getDirectory(path);
          if (instance.unreadableDirectories.add(directory))
          {
 +            directoriesVersion.incrementAndGet();
-             logger.warn("Blacklisting {} for reads", directory);
+             logger.warn("Disallowing {} for reads", directory);
              return directory;
          }
          return null;
@@@ -97,8 -84,7 +98,8 @@@
          File directory = getDirectory(path);
          if (instance.unwritableDirectories.add(directory))
          {
 +            directoriesVersion.incrementAndGet();
-             logger.warn("Blacklisting {} for writes", directory);
+             logger.warn("Disallowing {} for writes", directory);
              return directory;
          }
          return null;
diff --cc src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java
index 3fb9f39,8e825dd..64f15e5
--- a/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java
+++ b/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java
@@@ -20,8 -20,9 +20,8 @@@ package org.apache.cassandra.db
  import java.io.File;
  import java.util.Set;
  
- public interface BlacklistedDirectoriesMBean
+ public interface DisallowedDirectoriesMBean
  {
 -
      public Set<File> getUnreadableDirectories();
      
      public Set<File> getUnwritableDirectories();
diff --cc src/java/org/apache/cassandra/db/DiskBoundaries.java
index 90af893,0000000..cb046eb
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/db/DiskBoundaries.java
+++ b/src/java/org/apache/cassandra/db/DiskBoundaries.java
@@@ -1,134 -1,0 +1,134 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.db;
 +
 +import java.util.Collections;
 +import java.util.List;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.collect.ImmutableList;
 +
 +import org.apache.cassandra.io.sstable.format.SSTableReader;
 +import org.apache.cassandra.service.StorageService;
 +
 +public class DiskBoundaries
 +{
 +    public final List<Directories.DataDirectory> directories;
 +    public final ImmutableList<PartitionPosition> positions;
 +    final long ringVersion;
 +    final int directoriesVersion;
 +    private final ColumnFamilyStore cfs;
 +    private volatile boolean isInvalid = false;
 +
 +    public DiskBoundaries(ColumnFamilyStore cfs, Directories.DataDirectory[] directories, int diskVersion)
 +    {
 +        this(cfs, directories, null, -1, diskVersion);
 +    }
 +
 +    @VisibleForTesting
 +    public DiskBoundaries(ColumnFamilyStore cfs, Directories.DataDirectory[] directories, List<PartitionPosition> positions, long ringVersion, int diskVersion)
 +    {
 +        this.directories = directories == null ? null : ImmutableList.copyOf(directories);
 +        this.positions = positions == null ? null : ImmutableList.copyOf(positions);
 +        this.ringVersion = ringVersion;
 +        this.directoriesVersion = diskVersion;
 +        this.cfs = cfs;
 +    }
 +
 +    public boolean equals(Object o)
 +    {
 +        if (this == o) return true;
 +        if (o == null || getClass() != o.getClass()) return false;
 +
 +        DiskBoundaries that = (DiskBoundaries) o;
 +
 +        if (ringVersion != that.ringVersion) return false;
 +        if (directoriesVersion != that.directoriesVersion) return false;
 +        if (!directories.equals(that.directories)) return false;
 +        return positions != null ? positions.equals(that.positions) : that.positions == null;
 +    }
 +
 +    public int hashCode()
 +    {
 +        int result = directories != null ? directories.hashCode() : 0;
 +        result = 31 * result + (positions != null ? positions.hashCode() : 0);
 +        result = 31 * result + (int) (ringVersion ^ (ringVersion >>> 32));
 +        result = 31 * result + directoriesVersion;
 +        return result;
 +    }
 +
 +    public String toString()
 +    {
 +        return "DiskBoundaries{" +
 +               "directories=" + directories +
 +               ", positions=" + positions +
 +               ", ringVersion=" + ringVersion +
 +               ", directoriesVersion=" + directoriesVersion +
 +               '}';
 +    }
 +
 +    /**
 +     * check if the given disk boundaries are out of date due not being set or to having too old diskVersion/ringVersion
 +     */
 +    public boolean isOutOfDate()
 +    {
 +        if (isInvalid)
 +            return true;
-         int currentDiskVersion = BlacklistedDirectories.getDirectoriesVersion();
++        int currentDiskVersion = DisallowedDirectories.getDirectoriesVersion();
 +        long currentRingVersion = StorageService.instance.getTokenMetadata().getRingVersion();
 +        return currentDiskVersion != directoriesVersion || (ringVersion != -1 && currentRingVersion != ringVersion);
 +    }
 +
 +    public void invalidate()
 +    {
 +        this.isInvalid = true;
 +    }
 +
 +    public int getDiskIndex(SSTableReader sstable)
 +    {
 +        if (positions == null)
 +        {
 +            return getBoundariesFromSSTableDirectory(sstable);
 +        }
 +
 +        int pos = Collections.binarySearch(positions, sstable.first);
 +        assert pos < 0; // boundaries are .minkeybound and .maxkeybound so they should never be equal
 +        return -pos - 1;
 +    }
 +
 +    /**
 +     * Try to figure out location based on sstable directory
 +     */
 +    private int getBoundariesFromSSTableDirectory(SSTableReader sstable)
 +    {
 +        Directories.DataDirectory actualDirectory = cfs.getDirectories().getDataDirectoryForFile(sstable.descriptor);
 +        for (int i = 0; i < directories.size(); i++)
 +        {
 +            Directories.DataDirectory directory = directories.get(i);
 +            if (actualDirectory != null && actualDirectory.equals(directory))
 +                return i;
 +        }
 +        return 0;
 +    }
 +
 +    public Directories.DataDirectory getCorrectDiskForSSTable(SSTableReader sstable)
 +    {
 +        return directories.get(getDiskIndex(sstable));
 +    }
 +}
diff --cc src/java/org/apache/cassandra/db/DiskBoundaryManager.java
index 61febe9,0000000..51343ad
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/db/DiskBoundaryManager.java
+++ b/src/java/org/apache/cassandra/db/DiskBoundaryManager.java
@@@ -1,140 -1,0 +1,140 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.db;
 +
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.List;
 +
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.dht.IPartitioner;
 +import org.apache.cassandra.dht.Range;
 +import org.apache.cassandra.dht.Splitter;
 +import org.apache.cassandra.dht.Token;
 +import org.apache.cassandra.locator.TokenMetadata;
 +import org.apache.cassandra.service.PendingRangeCalculatorService;
 +import org.apache.cassandra.service.StorageService;
 +import org.apache.cassandra.utils.FBUtilities;
 +
 +public class DiskBoundaryManager
 +{
 +    private static final Logger logger = LoggerFactory.getLogger(DiskBoundaryManager.class);
 +    private volatile DiskBoundaries diskBoundaries;
 +
 +    public DiskBoundaries getDiskBoundaries(ColumnFamilyStore cfs)
 +    {
 +        if (!cfs.getPartitioner().splitter().isPresent())
-             return new DiskBoundaries(cfs, cfs.getDirectories().getWriteableLocations(), BlacklistedDirectories.getDirectoriesVersion());
++            return new DiskBoundaries(cfs, cfs.getDirectories().getWriteableLocations(), DisallowedDirectories.getDirectoriesVersion());
 +        if (diskBoundaries == null || diskBoundaries.isOutOfDate())
 +        {
 +            synchronized (this)
 +            {
 +                if (diskBoundaries == null || diskBoundaries.isOutOfDate())
 +                {
 +                    logger.debug("Refreshing disk boundary cache for {}.{}", cfs.keyspace.getName(), cfs.getTableName());
 +                    DiskBoundaries oldBoundaries = diskBoundaries;
 +                    diskBoundaries = getDiskBoundaryValue(cfs);
 +                    logger.debug("Updating boundaries from {} to {} for {}.{}", oldBoundaries, diskBoundaries, cfs.keyspace.getName(), cfs.getTableName());
 +                }
 +            }
 +        }
 +        return diskBoundaries;
 +    }
 +
 +    public void invalidate()
 +    {
 +       if (diskBoundaries != null)
 +           diskBoundaries.invalidate();
 +    }
 +
 +    private static DiskBoundaries getDiskBoundaryValue(ColumnFamilyStore cfs)
 +    {
 +        Collection<Range<Token>> localRanges;
 +
 +        long ringVersion;
 +        TokenMetadata tmd;
 +        do
 +        {
 +            tmd = StorageService.instance.getTokenMetadata();
 +            ringVersion = tmd.getRingVersion();
 +            if (StorageService.instance.isBootstrapMode()
 +                && !StorageService.isReplacingSameAddress()) // When replacing same address, the node marks itself as UN locally
 +            {
 +                PendingRangeCalculatorService.instance.blockUntilFinished();
 +                localRanges = tmd.getPendingRanges(cfs.keyspace.getName(), FBUtilities.getBroadcastAddress());
 +            }
 +            else
 +            {
 +                // Reason we use use the future settled TMD is that if we decommission a node, we want to stream
 +                // from that node to the correct location on disk, if we didn't, we would put new files in the wrong places.
 +                // We do this to minimize the amount of data we need to move in rebalancedisks once everything settled
 +                localRanges = cfs.keyspace.getReplicationStrategy().getAddressRanges(tmd.cloneAfterAllSettled()).get(FBUtilities.getBroadcastAddress());
 +            }
 +            logger.debug("Got local ranges {} (ringVersion = {})", localRanges, ringVersion);
 +        }
 +        while (ringVersion != tmd.getRingVersion()); // if ringVersion is different here it means that
 +                                                     // it might have changed before we calculated localRanges - recalculate
 +
 +        int directoriesVersion;
 +        Directories.DataDirectory[] dirs;
 +        do
 +        {
-             directoriesVersion = BlacklistedDirectories.getDirectoriesVersion();
++            directoriesVersion = DisallowedDirectories.getDirectoriesVersion();
 +            dirs = cfs.getDirectories().getWriteableLocations();
 +        }
-         while (directoriesVersion != BlacklistedDirectories.getDirectoriesVersion()); // if directoriesVersion has changed we need to recalculate
++        while (directoriesVersion != DisallowedDirectories.getDirectoriesVersion()); // if directoriesVersion has changed we need to recalculate
 +
 +        if (localRanges == null || localRanges.isEmpty())
 +            return new DiskBoundaries(cfs, dirs, null, ringVersion, directoriesVersion);
 +
 +        List<Range<Token>> sortedLocalRanges = Range.sort(localRanges);
 +
 +        List<PartitionPosition> positions = getDiskBoundaries(sortedLocalRanges, cfs.getPartitioner(), dirs);
 +        return new DiskBoundaries(cfs, dirs, positions, ringVersion, directoriesVersion);
 +    }
 +
 +    /**
 +     * Returns a list of disk boundaries, the result will differ depending on whether vnodes are enabled or not.
 +     *
 +     * What is returned are upper bounds for the disks, meaning everything from partitioner.minToken up to
 +     * getDiskBoundaries(..).get(0) should be on the first disk, everything between 0 to 1 should be on the second disk
 +     * etc.
 +     *
 +     * The final entry in the returned list will always be the partitioner maximum tokens upper key bound
 +     */
 +    private static List<PartitionPosition> getDiskBoundaries(List<Range<Token>> sortedLocalRanges, IPartitioner partitioner, Directories.DataDirectory[] dataDirectories)
 +    {
 +        assert partitioner.splitter().isPresent();
 +        Splitter splitter = partitioner.splitter().get();
 +        boolean dontSplitRanges = DatabaseDescriptor.getNumTokens() > 1;
 +        List<Token> boundaries = splitter.splitOwnedRanges(dataDirectories.length, sortedLocalRanges, dontSplitRanges);
 +        // If we can't split by ranges, split evenly to ensure utilisation of all disks
 +        if (dontSplitRanges && boundaries.size() < dataDirectories.length)
 +            boundaries = splitter.splitOwnedRanges(dataDirectories.length, sortedLocalRanges, false);
 +
 +        List<PartitionPosition> diskBoundaries = new ArrayList<>();
 +        for (int i = 0; i < boundaries.size() - 1; i++)
 +            diskBoundaries.add(boundaries.get(i).maxKeyBound());
 +        diskBoundaries.add(partitioner.getMaximumToken().maxKeyBound());
 +        return diskBoundaries;
 +    }
 +}
diff --cc src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
index 520b08d,bf543e5..8a8362f
--- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
+++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java
@@@ -609,19 -574,9 +609,19 @@@ public class LeveledManifes
          }
      };
  
 +    private static Map<SSTableReader, Bounds<Token>> genBounds(Iterable<SSTableReader> ssTableReaders)
 +    {
 +        Map<SSTableReader, Bounds<Token>> boundsMap = new HashMap<>();
 +        for (SSTableReader sstable : ssTableReaders)
 +        {
 +            boundsMap.put(sstable, new Bounds<Token>(sstable.first.getToken(), sstable.last.getToken()));
 +        }
 +        return boundsMap;
 +    }
 +
      /**
       * @return highest-priority sstables to compact for the given level.
-      * If no compactions are possible (because of concurrent compactions or because some sstables are blacklisted
+      * If no compactions are possible (because of concurrent compactions or because some sstables are excluded
       * for prior failure), will return an empty list.  Never returns null.
       */
      private Collection<SSTableReader> getCandidatesFor(int level)
diff --cc src/java/org/apache/cassandra/dht/RangeStreamer.java
index fd33d19,32e084f..a3cc996
--- a/src/java/org/apache/cassandra/dht/RangeStreamer.java
+++ b/src/java/org/apache/cassandra/dht/RangeStreamer.java
@@@ -122,24 -122,6 +122,24 @@@ public class RangeStreame
          }
      }
  
 +    /**
 +     * Source filter which only includes endpoints contained within a provided set.
 +     */
-     public static class WhitelistedSourcesFilter implements ISourceFilter
++    public static class AllowedSourcesFilter implements ISourceFilter
 +    {
-         private final Set<InetAddress> whitelistedSources;
++        private final Set<InetAddress> allowedSources;
 +
-         public WhitelistedSourcesFilter(Set<InetAddress> whitelistedSources)
++        public AllowedSourcesFilter(Set<InetAddress> allowedSources)
 +        {
-             this.whitelistedSources = whitelistedSources;
++            this.allowedSources = allowedSources;
 +        }
 +
 +        public boolean shouldInclude(InetAddress endpoint)
 +        {
-             return whitelistedSources.contains(endpoint);
++            return allowedSources.contains(endpoint);
 +        }
 +    }
 +
      public RangeStreamer(TokenMetadata metadata,
                           Collection<Token> tokens,
                           InetAddress address,
diff --cc src/java/org/apache/cassandra/service/StorageService.java
index 8466eb3,f9efdb8..3d31596
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@@ -1226,78 -1139,8 +1226,78 @@@ public class StorageService extends Not
              if (sourceDc != null)
                  streamer.addSourceFilter(new RangeStreamer.SingleDatacenterFilter(DatabaseDescriptor.getEndpointSnitch(), sourceDc));
  
 -            for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
 -                streamer.addRanges(keyspaceName, getLocalRanges(keyspaceName));
 +            if (keyspace == null)
 +            {
 +                for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
 +                    streamer.addRanges(keyspaceName, getLocalRanges(keyspaceName));
 +            }
 +            else if (tokens == null)
 +            {
 +                streamer.addRanges(keyspace, getLocalRanges(keyspace));
 +            }
 +            else
 +            {
 +                Token.TokenFactory factory = getTokenFactory();
 +                List<Range<Token>> ranges = new ArrayList<>();
 +                Pattern rangePattern = Pattern.compile("\\(\\s*(-?\\w+)\\s*,\\s*(-?\\w+)\\s*\\]");
 +                try (Scanner tokenScanner = new Scanner(tokens))
 +                {
 +                    while (tokenScanner.findInLine(rangePattern) != null)
 +                    {
 +                        MatchResult range = tokenScanner.match();
 +                        Token startToken = factory.fromString(range.group(1));
 +                        Token endToken = factory.fromString(range.group(2));
 +                        logger.info("adding range: ({},{}]", startToken, endToken);
 +                        ranges.add(new Range<>(startToken, endToken));
 +                    }
 +                    if (tokenScanner.hasNext())
 +                        throw new IllegalArgumentException("Unexpected string: " + tokenScanner.next());
 +                }
 +
 +                // Ensure all specified ranges are actually ranges owned by this host
 +                Collection<Range<Token>> localRanges = getLocalRanges(keyspace);
 +                for (Range<Token> specifiedRange : ranges)
 +                {
 +                    boolean foundParentRange = false;
 +                    for (Range<Token> localRange : localRanges)
 +                    {
 +                        if (localRange.contains(specifiedRange))
 +                        {
 +                            foundParentRange = true;
 +                            break;
 +                        }
 +                    }
 +                    if (!foundParentRange)
 +                    {
 +                        throw new IllegalArgumentException(String.format("The specified range %s is not a range that is owned by this node. Please ensure that all token ranges specified to be rebuilt belong to this node.", specifiedRange.toString()));
 +                    }
 +                }
 +
 +                if (specificSources != null)
 +                {
 +                    String[] stringHosts = specificSources.split(",");
 +                    Set<InetAddress> sources = new HashSet<>(stringHosts.length);
 +                    for (String stringHost : stringHosts)
 +                    {
 +                        try
 +                        {
 +                            InetAddress endpoint = InetAddress.getByName(stringHost);
 +                            if (FBUtilities.getBroadcastAddress().equals(endpoint))
 +                            {
 +                                throw new IllegalArgumentException("This host was specified as a source for rebuilding. Sources for a rebuild can only be other nodes in the cluster.");
 +                            }
 +                            sources.add(endpoint);
 +                        }
 +                        catch (UnknownHostException ex)
 +                        {
 +                            throw new IllegalArgumentException("Unknown host specified " + stringHost, ex);
 +                        }
 +                    }
-                     streamer.addSourceFilter(new RangeStreamer.WhitelistedSourcesFilter(sources));
++                    streamer.addSourceFilter(new RangeStreamer.AllowedSourcesFilter(sources));
 +                }
 +
 +                streamer.addRanges(keyspace, ranges);
 +            }
  
              StreamResultFuture resultFuture = streamer.fetchAsync();
              // wait for result
diff --cc test/unit/org/apache/cassandra/Util.java
index 006cd76,a49440d..fa24167
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@@ -687,29 -646,6 +687,29 @@@ public class Uti
          {
              // Expected -- marked all directories as unwritable
          }
-         return () -> BlacklistedDirectories.clearUnwritableUnsafe();
+         return () -> DisallowedDirectories.clearUnwritableUnsafe();
      }
 +
 +    public static PagingState makeSomePagingState(ProtocolVersion protocolVersion)
 +    {
 +        return makeSomePagingState(protocolVersion, Integer.MAX_VALUE);
 +    }
 +
 +    public static PagingState makeSomePagingState(ProtocolVersion protocolVersion, int remainingInPartition)
 +    {
 +        CFMetaData metadata = CFMetaData.Builder.create("ks", "tbl")
 +                                                .addPartitionKey("k", AsciiType.instance)
 +                                                .addClusteringColumn("c1", AsciiType.instance)
 +                                                .addClusteringColumn("c1", Int32Type.instance)
 +                                                .addRegularColumn("myCol", AsciiType.instance)
 +                                                .build();
 +
 +        ByteBuffer pk = ByteBufferUtil.bytes("someKey");
 +
 +        ColumnDefinition def = metadata.getColumnDefinition(new ColumnIdentifier("myCol", false));
 +        Clustering c = Clustering.make(ByteBufferUtil.bytes("c1"), ByteBufferUtil.bytes(42));
 +        Row row = BTreeRow.singleCellRow(c, BufferCell.live(def, 0, ByteBufferUtil.EMPTY_BYTE_BUFFER));
 +        PagingState.RowMark mark = PagingState.RowMark.create(metadata, row, protocolVersion);
 +        return new PagingState(pk, mark, 10, remainingInPartition);
 +    }
  }
diff --cc test/unit/org/apache/cassandra/auth/jmx/AuthorizationProxyTest.java
index 84282e0,0000000..e68ef20
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/auth/jmx/AuthorizationProxyTest.java
+++ b/test/unit/org/apache/cassandra/auth/jmx/AuthorizationProxyTest.java
@@@ -1,581 -1,0 +1,581 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.auth.jmx;
 +
 +import java.util.*;
 +import java.util.concurrent.atomic.AtomicBoolean;
 +import java.util.function.Function;
 +import java.util.function.Supplier;
 +import java.util.stream.Collectors;
 +import javax.management.MalformedObjectNameException;
 +import javax.management.ObjectName;
 +import javax.security.auth.Subject;
 +
 +import com.google.common.collect.ImmutableMap;
 +import com.google.common.collect.ImmutableSet;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.auth.*;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +public class AuthorizationProxyTest
 +{
 +    @BeforeClass
 +    public static void setup() throws Exception
 +    {
 +        DatabaseDescriptor.daemonInitialization();
 +    }
 +
 +    JMXResource osBean = JMXResource.mbean("java.lang:type=OperatingSystem");
 +    JMXResource runtimeBean = JMXResource.mbean("java.lang:type=Runtime");
 +    JMXResource threadingBean = JMXResource.mbean("java.lang:type=Threading");
 +    JMXResource javaLangWildcard = JMXResource.mbean("java.lang:type=*");
 +
 +    JMXResource hintsBean = JMXResource.mbean("org.apache.cassandra.hints:type=HintsService");
 +    JMXResource batchlogBean = JMXResource.mbean("org.apache.cassandra.db:type=BatchlogManager");
 +    JMXResource customBean = JMXResource.mbean("org.apache.cassandra:type=CustomBean,property=foo");
 +    Set<ObjectName> allBeans = objectNames(osBean, runtimeBean, threadingBean, hintsBean, batchlogBean, customBean);
 +
 +    RoleResource role1 = RoleResource.role("r1");
 +
 +    @Test
 +    public void roleHasRequiredPermission() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, osBean, Permission.SELECT)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void roleDoesNotHaveRequiredPermission() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, osBean, Permission.AUTHORIZE)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .isAuthzRequired(() -> true).build();
 +
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "setAttribute",
 +                                    new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void roleHasRequiredPermissionOnRootResource() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, JMXResource.root(), Permission.SELECT)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void roleHasOtherPermissionOnRootResource() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, JMXResource.root(), Permission.AUTHORIZE)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .build();
 +
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "invoke",
 +                                    new Object[]{ objectName(osBean), "bogusMethod" }));
 +    }
 +
 +    @Test
 +    public void roleHasNoPermissions() throws Throwable
 +    {
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions((role) -> Collections.emptySet())
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .build();
 +
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "getAttribute",
 +                                    new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void roleHasNoPermissionsButIsSuperuser() throws Throwable
 +    {
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> true)
 +                                                     .getPermissions((role) -> Collections.emptySet())
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void roleHasNoPermissionsButAuthzNotRequired() throws Throwable
 +    {
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions((role) -> Collections.emptySet())
 +                                                     .isAuthzRequired(() -> false)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void authorizeWhenSubjectIsNull() throws Throwable
 +    {
 +        // a null subject indicates that the action is being performed by the
 +        // connector itself, so we always authorize it
 +        // Verify that the superuser status is never tested as the request returns early
 +        // due to the null Subject
 +        // Also, hardcode the permissions provider to return an empty set, so we know that
 +        // can be doubly sure that it's the null Subject which causes the authz to succeed
 +        final AtomicBoolean suStatusChecked = new AtomicBoolean(false);
 +        AuthorizationProxy proxy = new ProxyBuilder().getPermissions((role) -> Collections.emptySet())
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) ->
 +                                                                  {
 +                                                                      suStatusChecked.set(true);
 +                                                                      return false;
 +                                                                  })
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(null,
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +        assertFalse(suStatusChecked.get());
 +    }
 +
 +    @Test
 +    public void rejectWhenSubjectNotAuthenticated() throws Throwable
 +    {
 +        // Access is denied to a Subject without any associated Principals
 +        // Verify that the superuser status is never tested as the request is rejected early
 +        // due to the Subject
 +        final AtomicBoolean suStatusChecked = new AtomicBoolean(false);
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) ->
 +                                                                  {
 +                                                                      suStatusChecked.set(true);
 +                                                                      return true;
 +                                                                  })
 +                                                     .build();
 +        assertFalse(proxy.authorize(new Subject(),
 +                                    "getAttribute",
 +                                    new Object[]{ objectName(osBean), "arch" }));
 +        assertFalse(suStatusChecked.get());
 +    }
 +
 +    @Test
 +    public void authorizeWhenWildcardGrantCoversExactTarget() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, javaLangWildcard, Permission.SELECT)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void rejectWhenWildcardGrantDoesNotCoverExactTarget() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, javaLangWildcard, Permission.SELECT)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .build();
 +
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "getAttribute",
 +                                    new Object[]{ objectName(customBean), "arch" }));
 +    }
 +
 +    @Test
 +    public void authorizeWhenWildcardGrantCoversWildcardTarget() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, javaLangWildcard, Permission.DESCRIBE)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .queryNames(matcher(allBeans))
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "queryNames",
 +                                   new Object[]{ objectName(javaLangWildcard), null }));
 +    }
 +
 +    @Test
 +    public void rejectWhenWildcardGrantIsDisjointWithWildcardTarget() throws Throwable
 +    {
 +        JMXResource customWildcard = JMXResource.mbean("org.apache.cassandra:*");
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, customWildcard, Permission.DESCRIBE)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .queryNames(matcher(allBeans))
 +                                                     .build();
 +
 +        // the grant on org.apache.cassandra:* shouldn't permit us to invoke queryNames with java.lang:*
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "queryNames",
 +                                    new Object[]{ objectName(javaLangWildcard), null }));
 +    }
 +
 +    @Test
 +    public void rejectWhenWildcardGrantIntersectsWithWildcardTarget() throws Throwable
 +    {
 +        // in this test, permissions are granted on org.apache.cassandra:type=CustomBean,property=*
 +        // and all beans in the org.apache.cassandra.hints domain, but
 +        // but the target of the invocation is org.apache.cassandra*:*
 +        // i.e. the subject has permissions on all CustomBeans and on the HintsService bean, but is
 +        // attempting to query all names in the org.apache.cassandra* domain. The operation should
 +        // be rejected as the permissions don't cover all known beans matching that domain, due to
 +        // the BatchLogManager bean.
 +
 +        JMXResource allCustomBeans = JMXResource.mbean("org.apache.cassandra:type=CustomBean,property=*");
 +        JMXResource allHintsBeans = JMXResource.mbean("org.apache.cassandra.hints:*");
 +        ObjectName allCassandraBeans = ObjectName.getInstance("org.apache.cassandra*:*");
 +
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, ImmutableSet.of(permission(role1, allCustomBeans, Permission.DESCRIBE),
 +                                                   permission(role1, allHintsBeans, Permission.DESCRIBE)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .queryNames(matcher(allBeans))
 +                                                     .build();
 +
 +        // the grant on org.apache.cassandra:* shouldn't permit us to invoke queryNames with java.lang:*
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "queryNames",
 +                                    new Object[]{ allCassandraBeans, null }));
 +    }
 +
 +    @Test
 +    public void authorizeOnTargetWildcardWithPermissionOnRoot() throws Throwable
 +    {
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +            ImmutableMap.of(role1, Collections.singleton(permission(role1, JMXResource.root(), Permission.SELECT)));
 +
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(javaLangWildcard), "arch" }));
 +    }
 +
 +    @Test
 +    public void rejectInvocationOfUnknownMethod() throws Throwable
 +    {
 +        // Grant ALL permissions on the root resource, so we know that it's
 +        // the unknown method that causes the authz rejection. Of course, this
 +        // isn't foolproof but it's something.
 +        Set<PermissionDetails> allPerms = Permission.ALL.stream()
 +                                                        .map(perm -> permission(role1, JMXResource.root(), perm))
 +                                                        .collect(Collectors.toSet());
 +        Map<RoleResource, Set<PermissionDetails>> permissions = ImmutableMap.of(role1, allPerms);
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .build();
 +
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                    "unKnownMethod",
 +                                    new Object[] { ObjectName.getInstance(osBean.getObjectName()) }));
 +    }
 +
 +    @Test
-     public void rejectInvocationOfBlacklistedMethods() throws Throwable
++    public void rejectInvocationOfRestrictedMethods() throws Throwable
 +    {
 +        String[] methods = { "createMBean",
 +                             "deserialize",
 +                             "getClassLoader",
 +                             "getClassLoaderFor",
 +                             "instantiate",
 +                             "registerMBean",
 +                             "unregisterMBean" };
 +
 +        // Hardcode the superuser status check to return true, so any allowed method can be invoked.
 +        AuthorizationProxy proxy = new ProxyBuilder().isAuthzRequired(() -> true)
 +                                                     .isSuperuser((role) -> true)
 +                                                     .build();
 +
 +        for (String method : methods)
 +            // the arguments array isn't significant, so it can just be empty
 +            assertFalse(proxy.authorize(subject(role1.getRoleName()), method, new Object[0]));
 +    }
 +
 +    @Test
 +    public void authorizeMethodsWithoutMBeanArgumentIfPermissionsGranted() throws Throwable
 +    {
 +        // Certain methods on MBeanServer don't take an ObjectName as their first argument.
 +        // These methods are characterised by AuthorizationProxy as being concerned with
 +        // the MBeanServer itself, as opposed to a specific managed bean. Of these methods,
 +        // only those considered "descriptive" are allowed to be invoked by remote users.
 +        // These require the DESCRIBE permission on the root JMXResource.
 +        testNonMbeanMethods(true);
 +    }
 +
 +    @Test
 +    public void rejectMethodsWithoutMBeanArgumentIfPermissionsNotGranted() throws Throwable
 +    {
 +        testNonMbeanMethods(false);
 +    }
 +
 +    @Test
 +    public void rejectWhenAuthSetupIsNotComplete() throws Throwable
 +    {
 +        // IAuthorizer & IRoleManager should not be considered ready to use until
 +        // we know that auth setup has completed. So, even though the IAuthorizer
 +        // would theoretically grant access, the auth proxy should deny it if setup
 +        // hasn't finished.
 +
 +        Map<RoleResource, Set<PermissionDetails>> permissions =
 +        ImmutableMap.of(role1, Collections.singleton(permission(role1, osBean, Permission.SELECT)));
 +
 +        // verify that access is granted when setup is complete
 +        AuthorizationProxy proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                                     .getPermissions(permissions::get)
 +                                                     .isAuthzRequired(() -> true)
 +                                                     .isAuthSetupComplete(() -> true)
 +                                                     .build();
 +
 +        assertTrue(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +
 +        // and denied when it isn't
 +        proxy = new ProxyBuilder().isSuperuser((role) -> false)
 +                                  .getPermissions(permissions::get)
 +                                  .isAuthzRequired(() -> true)
 +                                  .isAuthSetupComplete(() -> false)
 +                                  .build();
 +
 +        assertFalse(proxy.authorize(subject(role1.getRoleName()),
 +                                   "getAttribute",
 +                                   new Object[]{ objectName(osBean), "arch" }));
 +    }
 +
 +    private void testNonMbeanMethods(boolean withPermission)
 +    {
 +        String[] methods = { "getDefaultDomain",
 +                             "getDomains",
 +                             "getMBeanCount",
 +                             "hashCode",
 +                             "queryMBeans",
 +                             "queryNames",
 +                             "toString" };
 +
 +
 +        ProxyBuilder builder = new ProxyBuilder().isAuthzRequired(() -> true).isSuperuser((role) -> false);
 +        if (withPermission)
 +        {
 +            Map<RoleResource, Set<PermissionDetails>> permissions =
 +                ImmutableMap.of(role1, ImmutableSet.of(permission(role1, JMXResource.root(), Permission.DESCRIBE)));
 +            builder.getPermissions(permissions::get);
 +        }
 +        else
 +        {
 +            builder.getPermissions((role) -> Collections.emptySet());
 +        }
 +        AuthorizationProxy proxy = builder.build();
 +
 +        for (String method : methods)
 +            assertEquals(withPermission, proxy.authorize(subject(role1.getRoleName()), method, new Object[]{ null }));
 +
-         // non-whitelisted methods should be rejected regardless.
++        // non-allowed methods should be rejected regardless.
 +        // This isn't exactly comprehensive, but it's better than nothing
 +        String[] notAllowed = { "fooMethod", "barMethod", "bazMethod" };
 +        for (String method : notAllowed)
 +            assertFalse(proxy.authorize(subject(role1.getRoleName()), method, new Object[]{ null }));
 +    }
 +
 +    // provides a simple matching function which can be substituted for the proxy's queryMBeans
 +    // utility (which by default just delegates to the MBeanServer)
 +    // This function just iterates over a supplied set of ObjectNames and filters out those
 +    // to which the target name *doesn't* apply
 +    private static Function<ObjectName, Set<ObjectName>> matcher(Set<ObjectName> allBeans)
 +    {
 +        return (target) -> allBeans.stream()
 +                                   .filter(target::apply)
 +                                   .collect(Collectors.toSet());
 +    }
 +
 +    private static PermissionDetails permission(RoleResource grantee, IResource resource, Permission permission)
 +    {
 +        return new PermissionDetails(grantee.getRoleName(), resource, permission);
 +    }
 +
 +    private static Subject subject(String roleName)
 +    {
 +        Subject subject = new Subject();
 +        subject.getPrincipals().add(new CassandraPrincipal(roleName));
 +        return subject;
 +    }
 +
 +    private static ObjectName objectName(JMXResource resource) throws MalformedObjectNameException
 +    {
 +        return ObjectName.getInstance(resource.getObjectName());
 +    }
 +
 +    private static Set<ObjectName> objectNames(JMXResource... resource)
 +    {
 +        Set<ObjectName> names = new HashSet<>();
 +        try
 +        {
 +            for (JMXResource r : resource)
 +                names.add(objectName(r));
 +        }
 +        catch (MalformedObjectNameException e)
 +        {
 +            fail("JMXResource returned invalid object name: " + e.getMessage());
 +        }
 +        return names;
 +    }
 +
 +    public static class ProxyBuilder
 +    {
 +        Function<RoleResource, Set<PermissionDetails>> getPermissions;
 +        Function<ObjectName, Set<ObjectName>> queryNames;
 +        Function<RoleResource, Boolean> isSuperuser;
 +        Supplier<Boolean> isAuthzRequired;
 +        Supplier<Boolean> isAuthSetupComplete = () -> true;
 +
 +        AuthorizationProxy build()
 +        {
 +            InjectableAuthProxy proxy = new InjectableAuthProxy();
 +
 +            if (getPermissions != null)
 +                proxy.setGetPermissions(getPermissions);
 +
 +            if (queryNames != null)
 +                proxy.setQueryNames(queryNames);
 +
 +            if (isSuperuser != null)
 +                proxy.setIsSuperuser(isSuperuser);
 +
 +            if (isAuthzRequired != null)
 +                proxy.setIsAuthzRequired(isAuthzRequired);
 +
 +            proxy.setIsAuthSetupComplete(isAuthSetupComplete);
 +
 +            return proxy;
 +        }
 +
 +        ProxyBuilder getPermissions(Function<RoleResource, Set<PermissionDetails>> f)
 +        {
 +            getPermissions = f;
 +            return this;
 +        }
 +
 +        ProxyBuilder queryNames(Function<ObjectName, Set<ObjectName>> f)
 +        {
 +            queryNames = f;
 +            return this;
 +        }
 +
 +        ProxyBuilder isSuperuser(Function<RoleResource, Boolean> f)
 +        {
 +            isSuperuser = f;
 +            return this;
 +        }
 +
 +        ProxyBuilder isAuthzRequired(Supplier<Boolean> s)
 +        {
 +            isAuthzRequired = s;
 +            return this;
 +        }
 +
 +        ProxyBuilder isAuthSetupComplete(Supplier<Boolean> s)
 +        {
 +            isAuthSetupComplete = s;
 +            return this;
 +        }
 +
 +        private static class InjectableAuthProxy extends AuthorizationProxy
 +        {
 +            void setGetPermissions(Function<RoleResource, Set<PermissionDetails>> f)
 +            {
 +                this.getPermissions = f;
 +            }
 +
 +            void setQueryNames(Function<ObjectName, Set<ObjectName>> f)
 +            {
 +                this.queryNames = f;
 +            }
 +
 +            void setIsSuperuser(Function<RoleResource, Boolean> f)
 +            {
 +                this.isSuperuser = f;
 +            }
 +
 +            void setIsAuthzRequired(Supplier<Boolean> s)
 +            {
 +                this.isAuthzRequired = s;
 +            }
 +
 +            void setIsAuthSetupComplete(Supplier<Boolean> s)
 +            {
 +                this.isAuthSetupComplete = s;
 +            }
 +        }
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
index 5231e17,0000000..febcfeb
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
@@@ -1,127 -1,0 +1,127 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.db;
 +
 +import java.io.File;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.util.List;
 +
 +import com.google.common.collect.Lists;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.cql3.CQLTester;
 +import org.apache.cassandra.dht.BootStrapper;
 +import org.apache.cassandra.locator.TokenMetadata;
 +import org.apache.cassandra.service.StorageService;
 +import org.apache.cassandra.utils.FBUtilities;
 +
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotSame;
 +import static org.junit.Assert.assertSame;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +public class DiskBoundaryManagerTest extends CQLTester
 +{
 +    private DiskBoundaryManager dbm;
 +    private MockCFS mock;
 +    private Directories dirs;
 +
 +    @Before
 +    public void setup()
 +    {
-         BlacklistedDirectories.clearUnwritableUnsafe();
++        DisallowedDirectories.clearUnwritableUnsafe();
 +        TokenMetadata metadata = StorageService.instance.getTokenMetadata();
 +        metadata.updateNormalTokens(BootStrapper.getRandomTokens(metadata, 10), FBUtilities.getBroadcastAddress());
 +        createTable("create table %s (id int primary key, x text)");
 +        dirs = new Directories(getCurrentColumnFamilyStore().metadata, Lists.newArrayList(new Directories.DataDirectory(new File("/tmp/1")),
 +                                                                                          new Directories.DataDirectory(new File("/tmp/2")),
 +                                                                                          new Directories.DataDirectory(new File("/tmp/3"))));
 +        mock = new MockCFS(getCurrentColumnFamilyStore(), dirs);
 +        dbm = mock.diskBoundaryManager;
 +    }
 +
 +    @Test
 +    public void getBoundariesTest()
 +    {
 +        DiskBoundaries dbv = dbm.getDiskBoundaries(mock);
 +        Assert.assertEquals(3, dbv.positions.size());
 +        assertEquals(dbv.directories, dirs.getWriteableLocations());
 +    }
 +
 +    @Test
-     public void blackListTest()
++    public void disallowedDirectoriesTest()
 +    {
 +        DiskBoundaries dbv = dbm.getDiskBoundaries(mock);
 +        Assert.assertEquals(3, dbv.positions.size());
 +        assertEquals(dbv.directories, dirs.getWriteableLocations());
-         BlacklistedDirectories.maybeMarkUnwritable(new File("/tmp/3"));
++        DisallowedDirectories.maybeMarkUnwritable(new File("/tmp/3"));
 +        dbv = dbm.getDiskBoundaries(mock);
 +        Assert.assertEquals(2, dbv.positions.size());
 +        Assert.assertEquals(Lists.newArrayList(new Directories.DataDirectory(new File("/tmp/1")),
 +                                        new Directories.DataDirectory(new File("/tmp/2"))),
 +                                 dbv.directories);
 +    }
 +
 +    @Test
 +    public void updateTokensTest() throws UnknownHostException
 +    {
 +        DiskBoundaries dbv1 = dbm.getDiskBoundaries(mock);
 +        StorageService.instance.getTokenMetadata().updateNormalTokens(BootStrapper.getRandomTokens(StorageService.instance.getTokenMetadata(), 10), InetAddress.getByName("127.0.0.10"));
 +        DiskBoundaries dbv2 = dbm.getDiskBoundaries(mock);
 +        assertFalse(dbv1.equals(dbv2));
 +    }
 +
 +    @Test
 +    public void alterKeyspaceTest() throws Throwable
 +    {
 +        //do not use mock to since it will not be invalidated after alter keyspace
 +        DiskBoundaryManager dbm = getCurrentColumnFamilyStore().diskBoundaryManager;
 +        DiskBoundaries dbv1 = dbm.getDiskBoundaries(mock);
 +        execute("alter keyspace "+keyspace()+" with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 }");
 +        DiskBoundaries dbv2 = dbm.getDiskBoundaries(mock);
 +        assertNotSame(dbv1, dbv2);
 +        DiskBoundaries dbv3 = dbm.getDiskBoundaries(mock);
 +        assertSame(dbv2, dbv3);
 +
 +    }
 +
 +    private static void assertEquals(List<Directories.DataDirectory> dir1, Directories.DataDirectory[] dir2)
 +    {
 +        if (dir1.size() != dir2.length)
 +            fail();
 +        for (int i = 0; i < dir2.length; i++)
 +        {
 +            if (!dir1.get(i).equals(dir2[i]))
 +                fail();
 +        }
 +    }
 +
 +    // just to be able to override the data directories
 +    private static class MockCFS extends ColumnFamilyStore
 +    {
 +        MockCFS(ColumnFamilyStore cfs, Directories dirs)
 +        {
 +            super(cfs.keyspace, cfs.getTableName(), 0, cfs.metadata, dirs, false, false, true);
 +        }
 +    }
 +}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org