http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterState.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterState.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterState.java
deleted file mode 100644
index 3a468ae..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterState.java
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.noggit.JSONWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-/**
- * Immutable state of the cloud. Normally you can get the state by using
- * {@link ZkStateReader#getClusterState()}.
- * @lucene.experimental
- */
-public class ClusterState implements JSONWriter.Writable {
-  private static Logger log = LoggerFactory.getLogger(ClusterState.class);
-  
-  private final Integer znodeVersion;
-  
-  private final Map<String, CollectionRef> collectionStates;
-  private Set<String> liveNodes;
-
-  /**
-   * Use this constr when ClusterState is meant for consumption.
-   */
-  public ClusterState(Integer znodeVersion, Set<String> liveNodes,
-      Map<String, DocCollection> collectionStates) {
-    this(liveNodes, getRefMap(collectionStates),znodeVersion);
-  }
-
-  private static Map<String, CollectionRef> getRefMap(Map<String, 
DocCollection> collectionStates) {
-    Map<String, CollectionRef> collRefs =  new 
LinkedHashMap<>(collectionStates.size());
-    for (Entry<String, DocCollection> entry : collectionStates.entrySet()) {
-      final DocCollection c = entry.getValue();
-      collRefs.put(entry.getKey(), new CollectionRef(c));
-    }
-    return collRefs;
-  }
-
-  /**Use this if all the collection states are not readily available and some 
needs to be lazily loaded
-   */
-  public ClusterState(Set<String> liveNodes, Map<String, CollectionRef> 
collectionStates, Integer znodeVersion){
-    this.znodeVersion = znodeVersion;
-    this.liveNodes = new HashSet<>(liveNodes.size());
-    this.liveNodes.addAll(liveNodes);
-    this.collectionStates = new LinkedHashMap<>(collectionStates);
-  }
-
-
-  /**
-   * Returns a new cluster state object modified with the given collection.
-   *
-   * @param collectionName the name of the modified (or deleted) collection
-   * @param collection     the collection object. A null value deletes the 
collection from the state
-   * @return the updated cluster state which preserves the current live nodes 
and zk node version
-   */
-  public ClusterState copyWith(String collectionName, DocCollection 
collection) {
-    ClusterState result = new ClusterState(liveNodes, new 
LinkedHashMap<>(collectionStates), znodeVersion);
-    if (collection == null) {
-      result.collectionStates.remove(collectionName);
-    } else {
-      result.collectionStates.put(collectionName, new 
CollectionRef(collection));
-    }
-    return result;
-  }
-
-
-  /**
-   * Get the lead replica for specific collection, or null if one currently 
doesn't exist.
-   */
-  public Replica getLeader(String collection, String sliceName) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) return null;
-    Slice slice = coll.getSlice(sliceName);
-    if (slice == null) return null;
-    return slice.getLeader();
-  }
-  private Replica getReplica(DocCollection coll, String replicaName) {
-    if (coll == null) return null;
-    for (Slice slice : coll.getSlices()) {
-      Replica replica = slice.getReplica(replicaName);
-      if (replica != null) return replica;
-    }
-    return null;
-  }
-
-  public boolean hasCollection(String coll) {
-    return  collectionStates.containsKey(coll) ;
-  }
-
-  /**
-   * Gets the replica by the core name (assuming the slice is unknown) or null 
if replica is not found.
-   * If the slice is known, do not use this method.
-   * coreNodeName is the same as replicaName
-   */
-  public Replica getReplica(final String collection, final String 
coreNodeName) {
-    return getReplica(getCollectionOrNull(collection), coreNodeName);
-  }
-
-  /**
-   * Get the named Slice for collection, or null if not found.
-   */
-  public Slice getSlice(String collection, String sliceName) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) return null;
-    return coll.getSlice(sliceName);
-  }
-
-  public Map<String, Slice> getSlicesMap(String collection) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) return null;
-    return coll.getSlicesMap();
-  }
-  
-  public Map<String, Slice> getActiveSlicesMap(String collection) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) return null;
-    return coll.getActiveSlicesMap();
-  }
-
-  public Collection<Slice> getSlices(String collection) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) return null;
-    return coll.getSlices();
-  }
-
-  public Collection<Slice> getActiveSlices(String collection) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) return null;
-    return coll.getActiveSlices();
-  }
-
-
-  /**
-   * Get the named DocCollection object, or throw an exception if it doesn't 
exist.
-   */
-  public DocCollection getCollection(String collection) {
-    DocCollection coll = getCollectionOrNull(collection);
-    if (coll == null) throw new SolrException(ErrorCode.BAD_REQUEST, "Could 
not find collection : " + collection);
-    return coll;
-  }
-
-  public CollectionRef getCollectionRef(String coll) {
-    return  collectionStates.get(coll);
-  }
-
-  public DocCollection getCollectionOrNull(String coll) {
-    CollectionRef ref = collectionStates.get(coll);
-    return ref == null? null:ref.get();
-  }
-
-  /**
-   * Get collection names.
-   */
-  public Set<String> getCollections() {
-    return collectionStates.keySet();
-  }
-
-
-  /**
-   * Get names of the currently live nodes.
-   */
-  public Set<String> getLiveNodes() {
-    return Collections.unmodifiableSet(liveNodes);
-  }
-
-  public String getShardId(String nodeName, String coreName) {
-    return getShardId(null, nodeName, coreName);
-  }
-
-  public String getShardId(String collectionName, String nodeName, String 
coreName) {
-    Collection<CollectionRef> states = collectionStates.values();
-    if (collectionName != null) {
-      CollectionRef c = collectionStates.get(collectionName);
-      if (c != null) states = Collections.singletonList( c );
-    }
-
-    for (CollectionRef ref : states) {
-      DocCollection coll = ref.get();
-      if(coll == null) continue;// this collection go tremoved in between, skip
-      for (Slice slice : coll.getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          // TODO: for really large clusters, we could 'index' on this
-          String rnodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-          String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-          if (nodeName.equals(rnodeName) && coreName.equals(rcore)) {
-            return slice.getName();
-          }
-        }
-      }
-    }
-    return null;
-  }
-  
-  /**
-   * Check if node is alive. 
-   */
-  public boolean liveNodesContain(String name) {
-    return liveNodes.contains(name);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("live nodes:" + liveNodes);
-    sb.append(" collections:" + collectionStates);
-    return sb.toString();
-  }
-
-  public static ClusterState load(Integer version, byte[] bytes, Set<String> 
liveNodes) {
-    return load(version, bytes, liveNodes, ZkStateReader.CLUSTER_STATE);
-  }
-  /**
-   * Create ClusterState from json string that is typically stored in 
zookeeper.
-   * 
-   * @param version zk version of the clusterstate.json file (bytes)
-   * @param bytes clusterstate.json as a byte array
-   * @param liveNodes list of live nodes
-   * @return the ClusterState
-   */
-  public static ClusterState load(Integer version, byte[] bytes, Set<String> 
liveNodes, String znode) {
-    // System.out.println("######## ClusterState.load:" + (bytes==null ? null 
: new String(bytes)));
-    if (bytes == null || bytes.length == 0) {
-      return new ClusterState(version, liveNodes, Collections.<String, 
DocCollection>emptyMap());
-    }
-    Map<String, Object> stateMap = (Map<String, Object>) 
ZkStateReader.fromJSON(bytes);
-    Map<String,CollectionRef> collections = new 
LinkedHashMap<>(stateMap.size());
-    for (Entry<String, Object> entry : stateMap.entrySet()) {
-      String collectionName = entry.getKey();
-      DocCollection coll = collectionFromObjects(collectionName, 
(Map<String,Object>)entry.getValue(), version, znode);
-      collections.put(collectionName, new CollectionRef(coll));
-    }
-
-    return new ClusterState( liveNodes, collections,version);
-  }
-
-
-  public static Aliases load(byte[] bytes) {
-    if (bytes == null || bytes.length == 0) {
-      return new Aliases();
-    }
-    Map<String,Map<String,String>> aliasMap = (Map<String,Map<String,String>>) 
ZkStateReader.fromJSON(bytes);
-
-    return new Aliases(aliasMap);
-  }
-
-  private static DocCollection collectionFromObjects(String name, Map<String, 
Object> objs, Integer version, String znode) {
-    Map<String,Object> props;
-    Map<String,Slice> slices;
-
-    Map<String,Object> sliceObjs = 
(Map<String,Object>)objs.get(DocCollection.SHARDS);
-    if (sliceObjs == null) {
-      // legacy format from 4.0... there was no separate "shards" level to 
contain the collection shards.
-      slices = makeSlices(objs);
-      props = Collections.emptyMap();
-    } else {
-      slices = makeSlices(sliceObjs);
-      props = new HashMap<>(objs);
-      objs.remove(DocCollection.SHARDS);
-    }
-
-    Object routerObj = props.get(DocCollection.DOC_ROUTER);
-    DocRouter router;
-    if (routerObj == null) {
-      router = DocRouter.DEFAULT;
-    } else if (routerObj instanceof String) {
-      // back compat with Solr4.4
-      router = DocRouter.getDocRouter((String)routerObj);
-    } else {
-      Map routerProps = (Map)routerObj;
-      router = DocRouter.getDocRouter((String) routerProps.get("name"));
-    }
-
-    return new DocCollection(name, slices, props, router, version, znode);
-  }
-
-  private static Map<String,Slice> makeSlices(Map<String,Object> 
genericSlices) {
-    if (genericSlices == null) return Collections.emptyMap();
-    Map<String,Slice> result = new LinkedHashMap<>(genericSlices.size());
-    for (Map.Entry<String,Object> entry : genericSlices.entrySet()) {
-      String name = entry.getKey();
-      Object val = entry.getValue();
-      if (val instanceof Slice) {
-        result.put(name, (Slice)val);
-      } else if (val instanceof Map) {
-        result.put(name, new Slice(name, null, (Map<String,Object>)val));
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public void write(JSONWriter jsonWriter) {
-    LinkedHashMap<String , DocCollection> map = new LinkedHashMap<>();
-    for (Entry<String, CollectionRef> e : collectionStates.entrySet()) {
-      // using this class check to avoid fetching from ZK in case of lazily 
loaded collection
-      if (e.getValue().getClass() == CollectionRef.class) {
-        // check if it is a lazily loaded collection outside of 
clusterstate.json
-        DocCollection coll = e.getValue().get();
-        if (coll.getStateFormat() == 1) {
-          map.put(coll.getName(),coll);
-        }
-      }
-    }
-    jsonWriter.write(map);
-  }
-
-  /**
-   * The version of clusterstate.json in ZooKeeper.
-   * 
-   * @return null if ClusterState was created for publication, not consumption
-   */
-  public Integer getZkClusterStateVersion() {
-    return znodeVersion;
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result
-        + ((znodeVersion == null) ? 0 : znodeVersion.hashCode());
-    result = prime * result + ((liveNodes == null) ? 0 : liveNodes.hashCode());
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (obj == null) return false;
-    if (getClass() != obj.getClass()) return false;
-    ClusterState other = (ClusterState) obj;
-    if (znodeVersion == null) {
-      if (other.znodeVersion != null) return false;
-    } else if (!znodeVersion.equals(other.znodeVersion)) return false;
-    if (liveNodes == null) {
-      if (other.liveNodes != null) return false;
-    } else if (!liveNodes.equals(other.liveNodes)) return false;
-    return true;
-  }
-
-
-
-  /**
-   * Internal API used only by ZkStateReader
-   */
-  void setLiveNodes(Set<String> liveNodes){
-    this.liveNodes = liveNodes;
-  }
-
-  /**For internal use only
-   */
-  Map<String, CollectionRef> getCollectionStates() {
-    return collectionStates;
-  }
-
-  public static class CollectionRef {
-    private final DocCollection coll;
-
-    public CollectionRef(DocCollection coll) {
-      this.coll = coll;
-    }
-
-    public DocCollection get(){
-      return coll;
-    }
-
-    public boolean isLazilyLoaded() { return false; }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterStateUtil.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterStateUtil.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterStateUtil.java
deleted file mode 100644
index 0cc98b1..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ClusterStateUtil.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ClusterStateUtil {
-  private static Logger log = LoggerFactory.getLogger(ClusterStateUtil.class);
-  
-  private static final int TIMEOUT_POLL_MS = 1000;
-  
-  /**
-   * Wait to see *all* cores live and active.
-   * 
-   * @param zkStateReader
-   *          to use for ClusterState
-   * @param timeoutInMs
-   *          how long to wait before giving up
-   * @return false if timed out
-   */
-  public static boolean waitForAllActiveAndLive(ZkStateReader zkStateReader, 
int timeoutInMs) {
-    return waitForAllActiveAndLive(zkStateReader, null, timeoutInMs);
-  }
-  
-  /**
-   * Wait to see *all* cores live and active.
-   * 
-   * @param zkStateReader
-   *          to use for ClusterState
-   * @param collection to look at
-   * @param timeoutInMs
-   *          how long to wait before giving up
-   * @return false if timed out
-   */
-  public static boolean waitForAllActiveAndLive(ZkStateReader zkStateReader, 
String collection,
-      int timeoutInMs) {
-    long timeout = System.nanoTime()
-        + TimeUnit.NANOSECONDS.convert(timeoutInMs, TimeUnit.MILLISECONDS);
-    boolean success = false;
-    while (System.nanoTime() < timeout) {
-      success = true;
-      ClusterState clusterState = zkStateReader.getClusterState();
-      if (clusterState != null) {
-        Set<String> collections;
-        if (collection != null) {
-          collections = Collections.singleton(collection);
-        } else {
-          collections = clusterState.getCollections();
-        }
-        for (String coll : collections) {
-          DocCollection docCollection = clusterState.getCollection(coll);
-          Collection<Slice> slices = docCollection.getSlices();
-          for (Slice slice : slices) {
-            // only look at active shards
-            if (slice.getState().equals(Slice.ACTIVE)) {
-              Collection<Replica> replicas = slice.getReplicas();
-              for (Replica replica : replicas) {
-                // on a live node?
-                boolean live = clusterState.liveNodesContain(replica
-                    .getNodeName());
-                String state = replica.getStr(ZkStateReader.STATE_PROP);
-                if (!live || !state.equals(ZkStateReader.ACTIVE)) {
-                  // fail
-                  success = false;
-                }
-              }
-            }
-          }
-        }
-        if (!success) {
-          try {
-            Thread.sleep(TIMEOUT_POLL_MS);
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted");
-          }
-        }
-      }
-    }
-    
-    return success;
-  }
-  
-  /**
-   * Wait to see an entry in the ClusterState with a specific coreNodeName and
-   * baseUrl.
-   * 
-   * @param zkStateReader
-   *          to use for ClusterState
-   * @param collection
-   *          to look in
-   * @param coreNodeName
-   *          to wait for
-   * @param baseUrl
-   *          to wait for
-   * @param timeoutInMs
-   *          how long to wait before giving up
-   * @return false if timed out
-   */
-  public static boolean waitToSeeLive(ZkStateReader zkStateReader,
-      String collection, String coreNodeName, String baseUrl,
-      int timeoutInMs) {
-    long timeout = System.nanoTime()
-        + TimeUnit.NANOSECONDS.convert(timeoutInMs, TimeUnit.MILLISECONDS);
-    
-    while (System.nanoTime() < timeout) {
-      log.debug("waiting to see replica just created live collection={} 
replica={} baseUrl={}",
-          collection, coreNodeName, baseUrl);
-      ClusterState clusterState = zkStateReader.getClusterState();
-      if (clusterState != null) {
-        DocCollection docCollection = clusterState.getCollection(collection);
-        Collection<Slice> slices = docCollection.getSlices();
-        for (Slice slice : slices) {
-          // only look at active shards
-          if (slice.getState().equals(Slice.ACTIVE)) {
-            Collection<Replica> replicas = slice.getReplicas();
-            for (Replica replica : replicas) {
-              // on a live node?
-              boolean live = 
clusterState.liveNodesContain(replica.getNodeName());
-              String rcoreNodeName = replica.getName();
-              String rbaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
-              if (live && coreNodeName.equals(rcoreNodeName)
-                  && baseUrl.equals(rbaseUrl)) {
-                // found it
-                return true;
-              }
-            }
-          }
-        }
-        try {
-          Thread.sleep(TIMEOUT_POLL_MS);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted");
-        }
-      }
-    }
-    
-    log.error("Timed out waiting to see replica just created in cluster state. 
Continuing...");
-    return false;
-  }
-  
-  public static boolean waitForAllNotLive(ZkStateReader zkStateReader, int 
timeoutInMs) {
-    return waitForAllNotLive(zkStateReader, null, timeoutInMs);
-  }
-  
-
-  public static boolean waitForAllNotLive(ZkStateReader zkStateReader,
-      String collection, int timeoutInMs) {
-    long timeout = System.nanoTime()
-        + TimeUnit.NANOSECONDS.convert(timeoutInMs, TimeUnit.MILLISECONDS);
-    boolean success = false;
-    while (System.nanoTime() < timeout) {
-      success = true;
-      ClusterState clusterState = zkStateReader.getClusterState();
-      if (clusterState != null) {
-        Set<String> collections;
-        if (collection == null) {
-          collections = clusterState.getCollections();
-        } else {
-          collections = Collections.singleton(collection);
-        }
-        for (String coll : collections) {
-          DocCollection docCollection = clusterState.getCollection(coll);
-          Collection<Slice> slices = docCollection.getSlices();
-          for (Slice slice : slices) {
-            // only look at active shards
-            if (slice.getState().equals(Slice.ACTIVE)) {
-              Collection<Replica> replicas = slice.getReplicas();
-              for (Replica replica : replicas) {
-                // on a live node?
-                boolean live = clusterState.liveNodesContain(replica
-                    .getNodeName());
-                if (live) {
-                  // fail
-                  success = false;
-                }
-              }
-            }
-          }
-        }
-        if (!success) {
-          try {
-            Thread.sleep(TIMEOUT_POLL_MS);
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted");
-          }
-        }
-      }
-    }
-    
-    return success;
-  }
-  
-  public static boolean isAutoAddReplicas(ZkStateReader reader, String 
collection) {
-    ClusterState clusterState = reader.getClusterState();
-    if (clusterState != null) {
-      DocCollection docCollection = 
clusterState.getCollectionOrNull(collection);
-      if (docCollection != null) {
-        return docCollection.getAutoAddReplicas();
-      }
-    }
-    return false;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/CompositeIdRouter.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/CompositeIdRouter.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/CompositeIdRouter.java
deleted file mode 100644
index 668519c..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/CompositeIdRouter.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.Hash;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-//
-// user!uniqueid
-// app!user!uniqueid
-// user/4!uniqueid
-// app/2!user/4!uniqueid
-//
-public class CompositeIdRouter extends HashBasedRouter {
-  public static final String NAME = "compositeId";
-
-  public static final String SEPARATOR = "!";
-
-  // separator used to optionally specify number of bits to allocate toward 
first part.
-  public static final int bitsSeparator = '/';
-  private int bits = 16;
-
-  @Override
-  public int sliceHash(String id, SolrInputDocument doc, SolrParams params, 
DocCollection collection) {
-    String shardFieldName = getRouteField(collection);
-    if (shardFieldName != null && doc != null) {
-      Object o = doc.getFieldValue(shardFieldName);
-      if (o == null)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No value 
for :" + shardFieldName + ". Unable to identify shard");
-      id = o.toString();
-    }
-    if (id.indexOf(SEPARATOR) < 0) {
-      return Hash.murmurhash3_x86_32(id, 0, id.length(), 0);
-    }
-
-    return new KeyParser(id).getHash();
-  }
-
-
-  /**
-   * Get Range for a given CompositeId based route key
-   *
-   * @param routeKey to return Range for
-   * @return Range for given routeKey
-   */
-  public Range keyHashRange(String routeKey) {
-    if (routeKey.indexOf(SEPARATOR) < 0) {
-      int hash = sliceHash(routeKey, null, null, null);
-      return new Range(hash, hash);
-    }
-
-    return new KeyParser(routeKey).getRange();
-  }
-
-  @Override
-  public Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams 
params, DocCollection collection) {
-    if (shardKey == null) {
-      // search across whole collection
-      // TODO: this may need modification in the future when shard splitting 
could cause an overlap
-      return collection.getActiveSlices();
-    }
-    String id = shardKey;
-
-    if (shardKey.indexOf(SEPARATOR) < 0) {
-      // shardKey is a simple id, so don't do a range
-      return Collections.singletonList(hashToSlice(Hash.murmurhash3_x86_32(id, 
0, id.length(), 0), collection));
-    }
-
-    Range completeRange = new KeyParser(id).getRange();
-
-    List<Slice> targetSlices = new ArrayList<>(1);
-    for (Slice slice : collection.getActiveSlices()) {
-      Range range = slice.getRange();
-      if (range != null && range.overlaps(completeRange)) {
-        targetSlices.add(slice);
-      }
-    }
-
-    return targetSlices;
-  }
-
-  public List<Range> partitionRangeByKey(String key, Range range) {
-    List<Range> result = new ArrayList<>(3);
-    Range keyRange = keyHashRange(key);
-    if (!keyRange.overlaps(range)) {
-      throw new IllegalArgumentException("Key range does not overlap given 
range");
-    }
-    if (keyRange.equals(range)) {
-      return Collections.singletonList(keyRange);
-    } else if (keyRange.isSubsetOf(range)) {
-      result.add(new Range(range.min, keyRange.min - 1));
-      result.add(keyRange);
-      result.add((new Range(keyRange.max + 1, range.max)));
-    } else if (range.includes(keyRange.max)) {
-      result.add(new Range(range.min, keyRange.max));
-      result.add(new Range(keyRange.max + 1, range.max));
-    } else {
-      result.add(new Range(range.min, keyRange.min - 1));
-      result.add(new Range(keyRange.min, range.max));
-    }
-    return result;
-  }
-
-  @Override
-  public List<Range> partitionRange(int partitions, Range range) {
-    int min = range.min;
-    int max = range.max;
-
-    assert max >= min;
-    if (partitions == 0) return Collections.EMPTY_LIST;
-    long rangeSize = (long) max - (long) min;
-    long rangeStep = Math.max(1, rangeSize / partitions);
-
-    List<Range> ranges = new ArrayList<>(partitions);
-
-    long start = min;
-    long end = start;
-
-    // keep track of the idealized target to avoid accumulating rounding errors
-    long targetStart = min;
-    long targetEnd = targetStart;
-
-    // Round to avoid splitting hash domains across ranges if such rounding is 
not significant.
-    // With default bits==16, one would need to create more than 4000 shards 
before this
-    // becomes false by default.
-    int mask = 0x0000ffff;
-    boolean round = rangeStep >= (1 << bits) * 16;
-
-    while (end < max) {
-      targetEnd = targetStart + rangeStep;
-      end = targetEnd;
-
-      if (round && ((end & mask) != mask)) {
-        // round up or down?
-        int increment = 1 << bits;  // 0x00010000
-        long roundDown = (end | mask) - increment;
-        long roundUp = (end | mask) + increment;
-        if (end - roundDown < roundUp - end && roundDown > start) {
-          end = roundDown;
-        } else {
-          end = roundUp;
-        }
-      }
-
-      // make last range always end exactly on MAX_VALUE
-      if (ranges.size() == partitions - 1) {
-        end = max;
-      }
-      ranges.add(new Range((int) start, (int) end));
-      start = end + 1L;
-      targetStart = targetEnd + 1L;
-    }
-
-    return ranges;
-  }
-
-  /**
-   * Helper class to calculate parts, masks etc for an id.
-   */
-  static class KeyParser {
-    String key;
-    int[] numBits;
-    int[] hashes;
-    int[] masks;
-    boolean triLevel;
-    int pieces;
-
-    public KeyParser(final String key) {
-      this.key = key;
-      List<String> partsList = new ArrayList<>(3);
-      int firstSeparatorPos = key.indexOf(SEPARATOR);
-      if (-1 == firstSeparatorPos) {
-        partsList.add(key);
-      } else {
-        partsList.add(key.substring(0, firstSeparatorPos));
-        int lastPos = key.length() - 1;
-        // Don't make any more parts if the first separator is the last char
-        if (firstSeparatorPos < lastPos) {
-          int secondSeparatorPos = key.indexOf(SEPARATOR, firstSeparatorPos + 
1);
-          if (-1 == secondSeparatorPos) {
-            partsList.add(key.substring(firstSeparatorPos + 1));
-          } else if (secondSeparatorPos == lastPos) {
-            // Don't make any more parts if the key has exactly two separators 
and 
-            // they're the last two chars - back-compatibility with the 
behavior of
-            // String.split() - see SOLR-6257.
-            if (firstSeparatorPos < secondSeparatorPos - 1) {
-              partsList.add(key.substring(firstSeparatorPos + 1, 
secondSeparatorPos));
-            }
-          } else { // The second separator is not the last char
-            partsList.add(key.substring(firstSeparatorPos + 1, 
secondSeparatorPos));
-            partsList.add(key.substring(secondSeparatorPos + 1));
-          }
-          // Ignore any further separators beyond the first two
-        }
-      }
-      pieces = partsList.size();
-      String[] parts = partsList.toArray(new String[pieces]);
-      numBits = new int[2];
-      if (key.endsWith("!") && pieces < 3)
-        pieces++;
-      hashes = new int[pieces];
-
-      if (pieces == 3) {
-        numBits[0] = 8;
-        numBits[1] = 8;
-        triLevel = true;
-      } else {
-        numBits[0] = 16;
-        triLevel = false;
-      }
-
-      for (int i = 0; i < pieces; i++) {
-        if (i < pieces - 1) {
-          int commaIdx = parts[i].indexOf(bitsSeparator);
-
-          if (commaIdx > 0) {
-            numBits[i] = getNumBits(parts[i], commaIdx);
-            parts[i] = parts[i].substring(0, commaIdx);
-          }
-        }
-        //Last component of an ID that ends with a '!'
-        if(i >= parts.length)
-          hashes[i] = Hash.murmurhash3_x86_32("", 0, "".length(), 0);
-        else
-          hashes[i] = Hash.murmurhash3_x86_32(parts[i], 0, parts[i].length(), 
0);
-      }
-      masks = getMasks();
-    }
-
-    Range getRange() {
-      int lowerBound;
-      int upperBound;
-
-      if (triLevel) {
-        lowerBound = hashes[0] & masks[0] | hashes[1] & masks[1];
-        upperBound = lowerBound | masks[2];
-      } else {
-        lowerBound = hashes[0] & masks[0];
-        upperBound = lowerBound | masks[1];
-      }
-      //  If the upper bits are 0xF0000000, the range we want to cover is
-      //  0xF0000000 0xFfffffff
-
-      if ((masks[0] == 0 && !triLevel) || (masks[0] == 0 && masks[1] == 0 && 
triLevel)) {
-        // no bits used from first part of key.. the code above will produce 
0x000000000->0xffffffff
-        // which only works on unsigned space, but we're using signed space.
-        lowerBound = Integer.MIN_VALUE;
-        upperBound = Integer.MAX_VALUE;
-      }
-      Range r = new Range(lowerBound, upperBound);
-      return r;
-    }
-
-    /**
-     * Get bit masks for routing based on routing level
-     */
-    private int[] getMasks() {
-      int[] masks;
-      if (triLevel)
-        masks = getBitMasks(numBits[0], numBits[1]);
-      else
-        masks = getBitMasks(numBits[0]);
-
-      return masks;
-    }
-
-    private int[] getBitMasks(int firstBits, int secondBits) {
-      // java can't shift 32 bits
-      int[] masks = new int[3];
-      masks[0] = firstBits == 0 ? 0 : (-1 << (32 - firstBits));
-      masks[1] = (firstBits + secondBits) == 0 ? 0 : (-1 << (32 - firstBits - 
secondBits));
-      masks[1] = masks[0] ^ masks[1];
-      masks[2] = (firstBits + secondBits) == 32 ? 0 : ~(masks[0] | masks[1]);
-      return masks;
-    }
-
-    private int getNumBits(String firstPart, int commaIdx) {
-      int v = 0;
-      for (int idx = commaIdx + 1; idx < firstPart.length(); idx++) {
-        char ch = firstPart.charAt(idx);
-        if (ch < '0' || ch > '9') return -1;
-        v = v * 10 + (ch - '0');
-      }
-      return v > 32 ? -1 : v;
-    }
-
-    private int[] getBitMasks(int firstBits) {
-      // java can't shift 32 bits
-      int[] masks;
-      masks = new int[2];
-      masks[0] = firstBits == 0 ? 0 : (-1 << (32 - firstBits));
-      masks[1] = firstBits == 32 ? 0 : (-1 >>> firstBits);
-      return masks;
-    }
-
-    int getHash() {
-      int result = hashes[0] & masks[0];
-
-      for (int i = 1; i < pieces; i++)
-        result = result | (hashes[i] & masks[i]);
-      return result;
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ConnectionManager.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ConnectionManager.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ConnectionManager.java
deleted file mode 100644
index 699f2ae..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ConnectionManager.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.solr.common.SolrException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.Watcher.Event.KeeperState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ConnectionManager implements Watcher {
-  protected static final Logger log = LoggerFactory
-      .getLogger(ConnectionManager.class);
-
-  private final String name;
-  
-  private volatile boolean connected = false;
-
-  private final ZkClientConnectionStrategy connectionStrategy;
-
-  private final String zkServerAddress;
-
-  private final SolrZkClient client;
-
-  private final OnReconnect onReconnect;
-  private final BeforeReconnect beforeReconnect;
-
-  private volatile boolean isClosed = false;
-
-  // Track the likely expired state
-  private static class LikelyExpiredState {
-    private static LikelyExpiredState NOT_EXPIRED = new 
LikelyExpiredState(StateType.NOT_EXPIRED, 0);
-    private static LikelyExpiredState EXPIRED = new 
LikelyExpiredState(StateType.EXPIRED, 0);
-
-    public enum StateType {
-      NOT_EXPIRED,    // definitely not expired
-      EXPIRED,        // definitely expired
-      TRACKING_TIME   // not sure, tracking time of last disconnect
-    }
-
-    private StateType stateType;
-    private long lastDisconnectTime;
-    public LikelyExpiredState(StateType stateType, long lastDisconnectTime) {
-      this.stateType = stateType;
-      this.lastDisconnectTime = lastDisconnectTime;
-    }
-
-    public boolean isLikelyExpired(long timeToExpire) {
-      return stateType == StateType.EXPIRED
-        || ( stateType == StateType.TRACKING_TIME && (System.nanoTime() - 
lastDisconnectTime >  TimeUnit.NANOSECONDS.convert(timeToExpire, 
TimeUnit.MILLISECONDS)));
-    }
-  }
-
-  private volatile LikelyExpiredState likelyExpiredState = 
LikelyExpiredState.EXPIRED;
-
-  public ConnectionManager(String name, SolrZkClient client, String 
zkServerAddress, ZkClientConnectionStrategy strat, OnReconnect onConnect, 
BeforeReconnect beforeReconnect) {
-    this.name = name;
-    this.client = client;
-    this.connectionStrategy = strat;
-    this.zkServerAddress = zkServerAddress;
-    this.onReconnect = onConnect;
-    this.beforeReconnect = beforeReconnect;
-  }
-  
-  private synchronized void connected() {
-    connected = true;
-    likelyExpiredState = LikelyExpiredState.NOT_EXPIRED;
-    notifyAll();
-  }
-
-  private synchronized void disconnected() {
-    connected = false;
-    // record the time we expired unless we are already likely expired
-    if (!likelyExpiredState.isLikelyExpired(0)) {
-      likelyExpiredState = new 
LikelyExpiredState(LikelyExpiredState.StateType.TRACKING_TIME, 
System.nanoTime());
-    }
-    notifyAll();
-  }
-
-  @Override
-  public void process(WatchedEvent event) {
-    if (log.isInfoEnabled()) {
-      log.info("Watcher " + this + " name:" + name + " got event " + event
-          + " path:" + event.getPath() + " type:" + event.getType());
-    }
-    
-    if (isClosed) {
-      log.info("Client->ZooKeeper status change trigger but we are already 
closed");
-      return;
-    }
-    
-    KeeperState state = event.getState();
-    
-    if (state == KeeperState.SyncConnected) {
-      connected();
-      connectionStrategy.connected();
-    } else if (state == KeeperState.Expired) {
-      // we don't call disconnected here, because we know we are expired
-      connected = false;
-      likelyExpiredState = LikelyExpiredState.EXPIRED;
-      
-      log.info("Our previous ZooKeeper session was expired. Attempting to 
reconnect to recover relationship with ZooKeeper...");
-      
-      if (beforeReconnect != null) {
-        try {
-          beforeReconnect.command();
-        } catch (Exception e) {
-          log.warn("Exception running beforeReconnect command", e);
-        }
-      }
-      
-      try {
-        connectionStrategy.reconnect(zkServerAddress,
-            client.getZkClientTimeout(), this,
-            new ZkClientConnectionStrategy.ZkUpdate() {
-              @Override
-              public void update(SolrZooKeeper keeper) {
-                try {
-                  waitForConnected(Long.MAX_VALUE);
-                } catch (Exception e1) {
-                  closeKeeper(keeper);
-                  throw new RuntimeException(e1);
-                }
-
-                log.info("Connection with ZooKeeper reestablished.");
-                try {
-                  client.updateKeeper(keeper);
-                } catch (InterruptedException e) {
-                  closeKeeper(keeper);
-                  Thread.currentThread().interrupt();
-                  // we must have been asked to stop
-                  throw new RuntimeException(e);
-                } catch (Exception t) {
-                  closeKeeper(keeper);
-                  throw new RuntimeException(t);
-                }
-
-                if (onReconnect != null) {
-                  onReconnect.command();
-                }
-              }
-            });
-      } catch (Exception e) {
-        SolrException.log(log, "", e);
-      }
-      log.info("Connected:" + connected);
-    } else if (state == KeeperState.Disconnected) {
-      log.info("zkClient has disconnected");
-      disconnected();
-      connectionStrategy.disconnected();
-    } else if (state == KeeperState.AuthFailed) {
-      log.warn("zkClient received AuthFailed");
-    }
-  }
-
-  public synchronized boolean isConnected() {
-    return !isClosed && connected;
-  }
-  
-  // we use a volatile rather than sync
-  // to avoid possible deadlock on shutdown
-  public void close() {
-    this.isClosed = true;
-    this.likelyExpiredState = LikelyExpiredState.EXPIRED;
-  }
-  
-  public boolean isLikelyExpired() {
-    return isClosed || likelyExpiredState.isLikelyExpired((long) 
(client.getZkClientTimeout() * 0.90));
-  }
-
-  public synchronized void waitForConnected(long waitForConnection)
-      throws TimeoutException {
-    log.info("Waiting for client to connect to ZooKeeper");
-    long expire = System.nanoTime() + 
TimeUnit.NANOSECONDS.convert(waitForConnection, TimeUnit.MILLISECONDS);
-    long left = 1;
-    while (!connected && left > 0) {
-      if (isClosed) {
-        break;
-      }
-      try {
-        wait(500);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        break;
-      }
-      left = expire - System.nanoTime();
-    }
-    if (!connected) {
-      throw new TimeoutException("Could not connect to ZooKeeper " + 
zkServerAddress + " within " + waitForConnection + " ms");
-    }
-    log.info("Client is connected to ZooKeeper");
-  }
-
-  public synchronized void waitForDisconnected(long timeout)
-      throws InterruptedException, TimeoutException {
-    long expire = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, 
TimeUnit.MILLISECONDS);
-    long left = timeout;
-    while (connected && left > 0) {
-      wait(left);
-      left = expire - System.nanoTime();
-    }
-    if (connected) {
-      throw new TimeoutException("Did not disconnect");
-    }
-  }
-
-  private void closeKeeper(SolrZooKeeper keeper) {
-    try {
-      keeper.close();
-    } catch (InterruptedException e) {
-      // Restore the interrupted status
-      Thread.currentThread().interrupt();
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-          "", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultConnectionStrategy.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultConnectionStrategy.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultConnectionStrategy.java
deleted file mode 100644
index 80cc3b2..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultConnectionStrategy.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.io.IOException;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.solr.common.SolrException;
-import org.apache.zookeeper.Watcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * TODO: improve backoff retry impl
- */
-public class DefaultConnectionStrategy extends ZkClientConnectionStrategy {
-
-  private static Logger log = 
LoggerFactory.getLogger(DefaultConnectionStrategy.class);
-  
-  @Override
-  public void connect(String serverAddress, int timeout, Watcher watcher, 
ZkUpdate updater) throws IOException, InterruptedException, TimeoutException {
-    SolrZooKeeper zk = createSolrZooKeeper(serverAddress, timeout, watcher);
-    boolean success = false;
-    try {
-      updater.update(zk);
-      success = true;
-    } finally {
-      if (!success) {
-        zk.close();
-      }
-    }
-  }
-
-  @Override
-  public void reconnect(final String serverAddress, final int zkClientTimeout,
-      final Watcher watcher, final ZkUpdate updater) throws IOException {
-    log.info("Connection expired - starting a new one...");
-    SolrZooKeeper zk = createSolrZooKeeper(serverAddress, zkClientTimeout, 
watcher);
-    boolean success = false;
-    try {
-      updater
-          .update(zk);
-      success = true;
-      log.info("Reconnected to ZooKeeper");
-    } catch (Exception e) {
-      SolrException.log(log, "Reconnect to ZooKeeper failed", e);
-      log.info("Reconnect to ZooKeeper failed");
-    } finally {
-      if (!success) {
-        try {
-          zk.close();
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-      }
-    }
-    
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkACLProvider.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkACLProvider.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkACLProvider.java
deleted file mode 100644
index e55ccf1..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkACLProvider.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.List;
-
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.ACL;
-
-public class DefaultZkACLProvider implements ZkACLProvider {
-
-  private List<ACL> globalACLsToAdd;
-  
-  @Override
-  public List<ACL> getACLsToAdd(String zNodePath) {
-    // In default (simple) implementation use the same set of ACLs for all 
znodes
-    if (globalACLsToAdd == null) {
-      synchronized (this) {
-        if (globalACLsToAdd == null) globalACLsToAdd = createGlobalACLsToAdd();
-      }
-    }
-    return globalACLsToAdd;
-
-  }
-  
-  protected List<ACL> createGlobalACLsToAdd() {
-    return ZooDefs.Ids.OPEN_ACL_UNSAFE;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkCredentialsProvider.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkCredentialsProvider.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkCredentialsProvider.java
deleted file mode 100644
index ce9ecc3..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DefaultZkCredentialsProvider.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-public class DefaultZkCredentialsProvider implements ZkCredentialsProvider {
-  
-  private Collection<ZkCredentials> zkCredentials;
-
-  @Override
-  public Collection<ZkCredentials> getCredentials() {
-    if (zkCredentials == null) {
-      synchronized (this) {
-        if (zkCredentials == null) zkCredentials = createCredentials();
-      }
-    }
-    return zkCredentials;
-  }
-  
-  protected Collection<ZkCredentials> createCredentials() {
-    return new ArrayList<ZkCredentials>();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocCollection.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocCollection.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocCollection.java
deleted file mode 100644
index f905df5..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocCollection.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.noggit.JSONUtil;
-import org.noggit.JSONWriter;
-
-/**
- * Models a Collection in zookeeper (but that Java name is obviously taken, 
hence "DocCollection")
- */
-public class DocCollection extends ZkNodeProps {
-  public static final String DOC_ROUTER = "router";
-  public static final String SHARDS = "shards";
-  public static final String STATE_FORMAT = "stateFormat";
-  private int znodeVersion = -1; // sentinel
-
-  private final String name;
-  private final Map<String, Slice> slices;
-  private final Map<String, Slice> activeSlices;
-  private final DocRouter router;
-  private final String znode;
-
-  private final Integer replicationFactor;
-  private final Integer maxShardsPerNode;
-  private final boolean autoAddReplicas;
-
-
-  public DocCollection(String name, Map<String, Slice> slices, Map<String, 
Object> props, DocRouter router) {
-    this(name, slices, props, router, -1, ZkStateReader.CLUSTER_STATE);
-  }
-
-  /**
-   * @param name  The name of the collection
-   * @param slices The logical shards of the collection.  This is used 
directly and a copy is not made.
-   * @param props  The properties of the slice.  This is used directly and a 
copy is not made.
-   */
-  public DocCollection(String name, Map<String, Slice> slices, Map<String, 
Object> props, DocRouter router, int zkVersion, String znode) {
-    super(props==null ? props = new HashMap<String,Object>() : props);
-    this.znodeVersion = zkVersion;
-    this.name = name;
-
-    this.slices = slices;
-    this.activeSlices = new HashMap<>();
-    Object replicationFactorObject = (Object) 
props.get(ZkStateReader.REPLICATION_FACTOR);
-    if (replicationFactorObject != null) {
-      this.replicationFactor = 
Integer.parseInt(replicationFactorObject.toString());
-    } else {
-      this.replicationFactor = null;
-    }
-    Object maxShardsPerNodeObject = (Object) 
props.get(ZkStateReader.MAX_SHARDS_PER_NODE);
-    if (maxShardsPerNodeObject != null) {
-      this.maxShardsPerNode = 
Integer.parseInt(maxShardsPerNodeObject.toString());
-    } else {
-      this.maxShardsPerNode = null;
-    }
-    Object autoAddReplicasObject = (Object) 
props.get(ZkStateReader.AUTO_ADD_REPLICAS);
-    if (autoAddReplicasObject != null) {
-      this.autoAddReplicas = 
Boolean.parseBoolean(autoAddReplicasObject.toString());
-    } else {
-      this.autoAddReplicas = false;
-    }
-
-    Iterator<Map.Entry<String, Slice>> iter = slices.entrySet().iterator();
-
-    while (iter.hasNext()) {
-      Map.Entry<String, Slice> slice = iter.next();
-      if (slice.getValue().getState().equals(Slice.ACTIVE))
-        this.activeSlices.put(slice.getKey(), slice.getValue());
-    }
-    this.router = router;
-    this.znode = znode == null? ZkStateReader.CLUSTER_STATE : znode;
-    assert name != null && slices != null;
-  }
-
-  /**Use this to make an exact copy of DocCollection with a new set of Slices 
and every other property as is
-   * @param slices the new set of Slices
-   * @return the resulting DocCollection
-   */
-  public DocCollection copyWithSlices(Map<String, Slice> slices){
-    return new DocCollection(getName(), slices, propMap, router, 
znodeVersion,znode);
-  }
-
-  /**
-   * Return collection name.
-   */
-  public String getName() {
-    return name;
-  }
-
-  public Slice getSlice(String sliceName) {
-    return slices.get(sliceName);
-  }
-
-  /**
-   * Gets the list of all slices for this collection.
-   */
-  public Collection<Slice> getSlices() {
-    return slices.values();
-  }
-
-
-  /**
-   * Return the list of active slices for this collection.
-   */
-  public Collection<Slice> getActiveSlices() {
-    return activeSlices.values();
-  }
-
-  /**
-   * Get the map of all slices (sliceName-&gt;Slice) for this collection.
-   */
-  public Map<String, Slice> getSlicesMap() {
-    return slices;
-  }
-
-  /**
-   * Get the map of active slices (sliceName-&gt;Slice) for this collection.
-   */
-  public Map<String, Slice> getActiveSlicesMap() {
-    return activeSlices;
-  }
-
-  public int getZNodeVersion(){
-    return znodeVersion;
-  }
-
-  public int getStateFormat() {
-    return ZkStateReader.CLUSTER_STATE.equals(znode) ? 1 : 2;
-  }
-  /**
-   * @return replication factor for this collection or null if no
-   *         replication factor exists.
-   */
-  public Integer getReplicationFactor() {
-    return replicationFactor;
-  }
-  
-  public boolean getAutoAddReplicas() {
-    return autoAddReplicas;
-  }
-  
-  public int getMaxShardsPerNode() {
-    if (maxShardsPerNode == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, 
ZkStateReader.MAX_SHARDS_PER_NODE + " is not in the cluster state.");
-    }
-    return maxShardsPerNode;
-  }
-
-  public String getZNode(){
-    return znode;
-  }
-
-
-  public DocRouter getRouter() {
-    return router;
-  }
-
-  @Override
-  public String toString() {
-    return "DocCollection("+name+")=" + JSONUtil.toJSON(this);
-  }
-
-  @Override
-  public void write(JSONWriter jsonWriter) {
-    LinkedHashMap<String, Object> all = new LinkedHashMap<>(slices.size() + 1);
-    all.putAll(propMap);
-    all.put(SHARDS, slices);
-    jsonWriter.write(all);
-  }
-
-  public Replica getReplica(String coreNodeName) {
-    for (Slice slice : slices.values()) {
-      Replica replica = slice.getReplica(coreNodeName);
-      if (replica != null) return replica;
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocRouter.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocRouter.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocRouter.java
deleted file mode 100644
index e245005..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/DocRouter.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.StrUtils;
-import org.noggit.JSONWriter;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
-
-/**
- * Class to partition int range into n ranges.
- * @lucene.experimental
- */
-public abstract class DocRouter {
-  public static final String DEFAULT_NAME = CompositeIdRouter.NAME;
-  public static final DocRouter DEFAULT = new CompositeIdRouter();
-
-
-  public static DocRouter getDocRouter(String routerName) {
-    DocRouter router = routerMap.get(routerName);
-    if (router != null) return router;
-    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown 
document router '"+ routerName + "'");
-  }
-
-  protected String getRouteField(DocCollection coll){
-    if(coll == null) return null;
-    Object o = coll.get(DOC_ROUTER);
-    if (o instanceof String) {
-      return null;
-      //old format. cannot have a routefield. Ignore it
-    }
-    Map m = (Map) o;
-    if(m == null) return null;
-    return (String) m.get("field");
-
-  }
-
-  public static Map<String,Object> getRouterSpec(ZkNodeProps props){
-    Map<String,Object> map =  new LinkedHashMap<>();
-    for (String s : props.keySet()) {
-      if(s.startsWith("router.")){
-        map.put(s.substring(7), props.get(s));
-      }
-    }
-    Object o = props.get("router");
-    if (o instanceof String) {
-      map.put("name", o);
-    } else if (map.get("name") == null) {
-      map.put("name", DEFAULT_NAME);
-    }
-    return  map;
-  }
-
-  // currently just an implementation detail...
-  private final static Map<String, DocRouter> routerMap;
-  static {
-    routerMap = new HashMap<>();
-    PlainIdRouter plain = new PlainIdRouter();
-    // instead of doing back compat this way, we could always convert the 
clusterstate on first read to "plain" if it doesn't have any properties.
-    routerMap.put(null, plain);     // back compat with 4.0
-    routerMap.put(PlainIdRouter.NAME, plain);
-    routerMap.put(CompositeIdRouter.NAME, 
DEFAULT_NAME.equals(CompositeIdRouter.NAME) ? DEFAULT : new 
CompositeIdRouter());
-    routerMap.put(ImplicitDocRouter.NAME, new ImplicitDocRouter());
-    // NOTE: careful that the map keys (the static .NAME members) are filled 
in by making them final
-  }
-
-
-  // Hash ranges can't currently "wrap" - i.e. max must be greater or equal to 
min.
-  // TODO: ranges may not be all contiguous in the future (either that or we 
will
-  // need an extra class to model a collection of ranges)
-  public static class Range implements JSONWriter.Writable, Comparable<Range> {
-    public int min;  // inclusive
-    public int max;  // inclusive
-
-    public Range(int min, int max) {
-      assert min <= max;
-      this.min = min;
-      this.max = max;
-    }
-
-    public boolean includes(int hash) {
-      return hash >= min && hash <= max;
-    }
-
-    public boolean isSubsetOf(Range superset) {
-      return superset.min <= min && superset.max >= max;
-    }
-
-    public boolean overlaps(Range other) {
-      return includes(other.min) || includes(other.max) || isSubsetOf(other);
-    }
-
-    @Override
-    public String toString() {
-      return Integer.toHexString(min) + '-' + Integer.toHexString(max);
-    }
-
-
-    @Override
-    public int hashCode() {
-      // difficult numbers to hash... only the highest bits will tend to 
differ.
-      // ranges will only overlap during a split, so we can just hash the 
lower range.
-      return (min>>28) + (min>>25) + (min>>21) + min;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj.getClass() != getClass()) return false;
-      Range other = (Range)obj;
-      return this.min == other.min && this.max == other.max;
-    }
-
-    @Override
-    public void write(JSONWriter writer) {
-      writer.write(toString());
-    }
-
-    @Override
-    public int compareTo(Range that) {
-      int mincomp = Integer.valueOf(this.min).compareTo(that.min);
-      return mincomp == 0 ? Integer.valueOf(this.max).compareTo(that.max) : 
mincomp;
-    }
-  }
-
-  public Range fromString(String range) {
-    int middle = range.indexOf('-');
-    String minS = range.substring(0, middle);
-    String maxS = range.substring(middle+1);
-    long min = Long.parseLong(minS, 16);  // use long to prevent the parsing 
routines from potentially worrying about overflow
-    long max = Long.parseLong(maxS, 16);
-    return new Range((int)min, (int)max);
-  }
-
-  public Range fullRange() {
-    return new Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
-  }
-
-  /**
-   * Returns the range for each partition
-   */
-  public List<Range> partitionRange(int partitions, Range range) {
-    int min = range.min;
-    int max = range.max;
-
-    assert max >= min;
-    if (partitions == 0) return Collections.EMPTY_LIST;
-    long rangeSize = (long)max - (long)min;
-    long rangeStep = Math.max(1, rangeSize / partitions);
-
-    List<Range> ranges = new ArrayList<>(partitions);
-
-    long start = min;
-    long end = start;
-
-    while (end < max) {
-      end = start + rangeStep;
-      // make last range always end exactly on MAX_VALUE
-      if (ranges.size() == partitions - 1) {
-        end = max;
-      }
-      ranges.add(new Range((int)start, (int)end));
-      start = end + 1L;
-    }
-
-    return ranges;
-  }
-
-  /** Returns the Slice that the document should reside on, or null if there 
is not enough information */
-  public abstract Slice getTargetSlice(String id, SolrInputDocument sdoc, 
String route, SolrParams params, DocCollection collection);
-
-  /** This method is consulted to determine what slices should be queried for 
a request when
-   *  an explicit shards parameter was not used.
-   *  This method only accepts a single shard key (or null).  If you have a 
comma separated list of shard keys,
-   *  call getSearchSlices
-   **/
-  public abstract Collection<Slice> getSearchSlicesSingle(String shardKey, 
SolrParams params, DocCollection collection);
-
-  public abstract boolean isTargetSlice(String id, SolrInputDocument sdoc, 
SolrParams params, String shardId, DocCollection collection);
-
-
-  /** This method is consulted to determine what slices should be queried for 
a request when
-   *  an explicit shards parameter was not used.
-   *  This method accepts a multi-valued shardKeys parameter (normally comma 
separated from the shard.keys request parameter)
-   *  and aggregates the slices returned by getSearchSlicesSingle for each 
shardKey.
-   **/
-  public Collection<Slice> getSearchSlices(String shardKeys, SolrParams 
params, DocCollection collection) {
-    if (shardKeys == null || shardKeys.indexOf(',') < 0) {
-      return getSearchSlicesSingle(shardKeys, params, collection);
-    }
-
-    List<String> shardKeyList = StrUtils.splitSmart(shardKeys, ",", true);
-    HashSet<Slice> allSlices = new HashSet<>();
-    for (String shardKey : shardKeyList) {
-      allSlices.addAll( getSearchSlicesSingle(shardKey, params, collection) );
-    }
-    return allSlices;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/HashBasedRouter.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/HashBasedRouter.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/HashBasedRouter.java
deleted file mode 100644
index d6e4319..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/HashBasedRouter.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.Hash;
-
-import java.util.Collection;
-import java.util.Collections;
-
-public abstract class HashBasedRouter extends DocRouter {
-
-  @Override
-  public Slice getTargetSlice(String id, SolrInputDocument sdoc, String route, 
SolrParams params, DocCollection collection) {
-    int hash;
-    if (route != null) {
-      hash = sliceHash(route, sdoc, params, collection);
-    } else {
-      if (id == null) id = getId(sdoc, params);
-      hash = sliceHash(id, sdoc, params, collection);
-    }
-    return hashToSlice(hash, collection);
-  }
-
-  @Override
-  public boolean isTargetSlice(String id, SolrInputDocument sdoc, SolrParams 
params, String shardId, DocCollection collection) {
-    if (id == null) id = getId(sdoc, params);
-    int hash = sliceHash(id, sdoc, params, collection);
-    Range range = collection.getSlice(shardId).getRange();
-    return range != null && range.includes(hash);
-  }
-
-  public int sliceHash(String id, SolrInputDocument sdoc, SolrParams params, 
DocCollection collection) {
-    return Hash.murmurhash3_x86_32(id, 0, id.length(), 0);
-  }
-
-  protected String getId(SolrInputDocument sdoc, SolrParams params) {
-    Object  idObj = sdoc.getFieldValue("id");  // blech
-    String id = idObj != null ? idObj.toString() : "null";  // should only 
happen on client side
-    return id;
-  }
-
-  protected Slice hashToSlice(int hash, DocCollection collection) {
-    for (Slice slice : collection.getActiveSlices()) {
-      Range range = slice.getRange();
-      if (range != null && range.includes(hash)) return slice;
-    }
-    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active 
slice servicing hash code " + Integer.toHexString(hash) + " in " + collection);
-  }
-
-
-  @Override
-  public Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams 
params, DocCollection collection) {
-    if (shardKey == null) {
-      // search across whole collection
-      // TODO: this may need modification in the future when shard splitting 
could cause an overlap
-      return collection.getActiveSlices();
-    }
-
-    // use the shardKey as an id for plain hashing
-    Slice slice = getTargetSlice(shardKey, null, null, params, collection);
-    return slice == null ? Collections.<Slice>emptyList() : 
Collections.singletonList(slice);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ImplicitDocRouter.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ImplicitDocRouter.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ImplicitDocRouter.java
deleted file mode 100644
index 859a045..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ImplicitDocRouter.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.SolrParams;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
-
-/** This document router is for custom sharding
- */
-public class ImplicitDocRouter extends DocRouter {
-
-  public static final String NAME = "implicit";
-
-  private static Logger log = LoggerFactory
-      .getLogger(ImplicitDocRouter.class);
-
-  @Override
-  public Slice getTargetSlice(String id, SolrInputDocument sdoc, String route, 
SolrParams params, DocCollection collection) {
-    String shard = null;
-
-    if (route != null) // if a route is already passed in, try to use it
-      shard = route;
-    else if (sdoc != null) {
-      String f = getRouteField(collection);
-      if(f !=null) {
-        Object o = sdoc.getFieldValue(f);
-        if (o != null) shard = o.toString();
-        else throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No 
value for field "+f +" in " + sdoc);
-      }
-      if(shard == null) {
-        Object o = sdoc.getFieldValue(_ROUTE_);
-        if (o != null) {
-          shard = o.toString();
-        }
-      }
-    }
-
-    if (shard == null) {
-      shard = params.get(_ROUTE_);
-    }
-
-    if (shard != null) {
-
-      Slice slice = collection.getSlice(shard);
-      if (slice == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard 
called =" + shard + " in " + collection);
-      }
-      return slice;
-    }
-
-    return null;  // no shard specified... use default.
-  }
-
-  @Override
-  public boolean isTargetSlice(String id, SolrInputDocument sdoc, SolrParams 
params, String shardId, DocCollection collection) {
-
-    // todo : how to handle this?
-    return false;
-  }
-
-  @Override
-  public Collection<Slice> getSearchSlicesSingle(String shardKey, SolrParams 
params, DocCollection collection) {
-
-    if (shardKey == null) {
-      return collection.getActiveSlices();
-    }
-
-    // assume the shardKey is just a slice name
-    Slice slice = collection.getSlice(shardKey);
-    if (slice == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "implicit 
router can't find shard " + shardKey + " in collection " + 
collection.getName());
-    }
-
-    return Collections.singleton(slice);
-  }
-
-  @Override
-  public List<Range> partitionRange(int partitions, Range range) {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/OnReconnect.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/OnReconnect.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/OnReconnect.java
deleted file mode 100644
index 2e390a2..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/OnReconnect.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.solr.common.cloud;
-
-public interface OnReconnect {
-  void command();
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/PlainIdRouter.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/PlainIdRouter.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/PlainIdRouter.java
deleted file mode 100644
index fb1210f..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/PlainIdRouter.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-public class PlainIdRouter extends HashBasedRouter {
-  public static final String NAME = "plain";
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/Replica.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/Replica.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/Replica.java
deleted file mode 100644
index bef338b..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/Replica.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import org.noggit.JSONUtil;
-
-import java.util.Map;
-
-
-public class Replica extends ZkNodeProps {
-  private final String name;
-  private final String nodeName;
-
-  public Replica(String name, Map<String,Object> propMap) {
-    super(propMap);
-    this.name = name;
-    nodeName = (String)propMap.get(ZkStateReader.NODE_NAME_PROP);
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  /** The name of the node this replica resides on */
-  public String getNodeName() {
-    return nodeName;
-  }
-
-  @Override
-  public String toString() {
-    return name + ':' + JSONUtil.toJSON(propMap, -1); // small enough, keep it 
on one line (i.e. no indent)
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/RoutingRule.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/RoutingRule.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/RoutingRule.java
deleted file mode 100644
index 915c71f..0000000
--- a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/RoutingRule.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.noggit.JSONUtil;
-
-/**
- * Used for routing docs with particular keys into another collection
- */
-public class RoutingRule extends ZkNodeProps {
-  private final List<DocRouter.Range> routeRanges;
-  private final String routeRangesStr;
-  private final String targetCollectionName;
-  private final Long expireAt;
-
-  public RoutingRule(String routeKey, Map<String, Object> propMap)  {
-    super(propMap);
-    this.routeRangesStr = (String) propMap.get("routeRanges");
-    String[] rangesArr = this.routeRangesStr.split(",");
-    if (rangesArr != null && rangesArr.length > 0)  {
-      this.routeRanges = new ArrayList<>();
-      for (String r : rangesArr) {
-        routeRanges.add(DocRouter.DEFAULT.fromString(r));
-      }
-    } else  {
-      this.routeRanges = null;
-    }
-    this.targetCollectionName = (String) propMap.get("targetCollection");
-    this.expireAt = Long.parseLong((String) propMap.get("expireAt"));
-  }
-
-  public List<DocRouter.Range> getRouteRanges() {
-    return routeRanges;
-  }
-
-  public String getTargetCollectionName() {
-    return targetCollectionName;
-  }
-
-  public Long getExpireAt() {
-    return expireAt;
-  }
-
-  public String getRouteRangesStr() {
-    return routeRangesStr;
-  }
-
-  @Override
-  public String toString() {
-    return JSONUtil.toJSON(propMap, -1);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/d9a661cf/ranger_solrj/src/main/java/org/apache/solr/common/cloud/SaslZkACLProvider.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/SaslZkACLProvider.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/SaslZkACLProvider.java
deleted file mode 100644
index 54f2eee..0000000
--- 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/SaslZkACLProvider.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.common.cloud;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-
-/**
- * ZkACLProvider that gives all permissions for the user specified in System
- * property "solr.authorization.superuser" (default: "solr") when using sasl,
- * and gives read permissions for anyone else.  Designed for a setup where
- * configurations have already been set up and will not be modified, or
- * where configuration changes are controlled via Solr APIs.
- */
-public class SaslZkACLProvider extends DefaultZkACLProvider {
-
-  private static String superUser = 
System.getProperty("solr.authorization.superuser", "solr");
-
-  @Override
-  protected List<ACL> createGlobalACLsToAdd() {
-    List<ACL> result = new ArrayList<ACL>();
-    result.add(new ACL(ZooDefs.Perms.ALL, new Id("sasl", superUser)));
-    result.add(new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE));
-
-    if (result.isEmpty()) {
-      result = super.createGlobalACLsToAdd();
-    }
-    return result;
-  }
-}

Reply via email to