http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZkStateReader.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZkStateReader.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZkStateReader.java
new file mode 100644
index 0000000..1f9ebbc
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -0,0 +1,925 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required byOCP applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.util.ByteUtils;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.Watcher.Event.EventType;
+import org.apache.zookeeper.data.Stat;
+import org.noggit.CharArr;
+import org.noggit.JSONParser;
+import org.noggit.JSONWriter;
+import org.noggit.ObjectBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+public class ZkStateReader implements Closeable {
+  private static Logger log = LoggerFactory.getLogger(ZkStateReader.class);
+  
+  public static final String BASE_URL_PROP = "base_url";
+  public static final String NODE_NAME_PROP = "node_name";
+  public static final String CORE_NODE_NAME_PROP = "core_node_name";
+  public static final String ROLES_PROP = "roles";
+  public static final String STATE_PROP = "state";
+  public static final String CORE_NAME_PROP = "core";
+  public static final String COLLECTION_PROP = "collection";
+  public static final String ELECTION_NODE_PROP = "election_node";
+  public static final String SHARD_ID_PROP = "shard";
+  public static final String REPLICA_PROP = "replica";
+  public static final String SHARD_RANGE_PROP = "shard_range";
+  public static final String SHARD_STATE_PROP = "shard_state";
+  public static final String SHARD_PARENT_PROP = "shard_parent";
+  public static final String NUM_SHARDS_PROP = "numShards";
+  public static final String LEADER_PROP = "leader";
+  public static final String PROPERTY_PROP = "property";
+  public static final String PROPERTY_VALUE_PROP = "property.value";
+  public static final String MAX_AT_ONCE_PROP = "maxAtOnce";
+  public static final String MAX_WAIT_SECONDS_PROP = "maxWaitSeconds";
+  public static final String COLLECTIONS_ZKNODE = "/collections";
+  public static final String LIVE_NODES_ZKNODE = "/live_nodes";
+  public static final String ALIASES = "/aliases.json";
+  public static final String CLUSTER_STATE = "/clusterstate.json";
+  public static final String CLUSTER_PROPS = "/clusterprops.json";
+  public static final String REJOIN_AT_HEAD_PROP = "rejoinAtHead";
+
+  public static final String REPLICATION_FACTOR = "replicationFactor";
+  public static final String MAX_SHARDS_PER_NODE = "maxShardsPerNode";
+  public static final String AUTO_ADD_REPLICAS = "autoAddReplicas";
+
+  public static final String ROLES = "/roles.json";
+
+  public static final String RECOVERING = "recovering";
+  public static final String RECOVERY_FAILED = "recovery_failed";
+  public static final String ACTIVE = "active";
+  public static final String DOWN = "down";
+  public static final String SYNC = "sync";
+
+  public static final String CONFIGS_ZKNODE = "/configs";
+  public final static String CONFIGNAME_PROP="configName";
+
+  public static final String LEGACY_CLOUD = "legacyCloud";
+
+  public static final String URL_SCHEME = "urlScheme";
+  
+  protected volatile ClusterState clusterState;
+
+  private static final long SOLRCLOUD_UPDATE_DELAY = 
Long.parseLong(System.getProperty("solrcloud.update.delay", "5000"));
+
+  public static final String LEADER_ELECT_ZKNODE = "leader_elect";
+
+  public static final String SHARD_LEADERS_ZKNODE = "leaders";
+  public static final String ELECTION_NODE = "election";
+
+  private final Set<String> watchedCollections = new HashSet<String>();
+
+  /**These are collections which are actively watched by this  instance .
+   *
+   */
+  private Map<String , DocCollection> watchedCollectionStates = new 
ConcurrentHashMap<String, DocCollection>();
+
+  private final ZkConfigManager configManager;
+
+
+  //
+  // convenience methods... should these go somewhere else?
+  //
+  public static byte[] toJSON(Object o) {
+    CharArr out = new CharArr();
+    new JSONWriter(out, 2).write(o); // indentation by default
+    return toUTF8(out);
+  }
+
+  public static byte[] toUTF8(CharArr out) {
+    byte[] arr = new byte[out.size() << 2]; // is 4x the real worst-case 
upper-bound?
+    int nBytes = ByteUtils.UTF16toUTF8(out, 0, out.size(), arr, 0);
+    return Arrays.copyOf(arr, nBytes);
+  }
+
+  public static Object fromJSON(byte[] utf8) {
+    // convert directly from bytes to chars
+    // and parse directly from that instead of going through
+    // intermediate strings or readers
+    CharArr chars = new CharArr();
+    ByteUtils.UTF8toUTF16(utf8, 0, utf8.length, chars);
+    JSONParser parser = new JSONParser(chars.getArray(), chars.getStart(), 
chars.length());
+    try {
+      return ObjectBuilder.getVal(parser);
+    } catch (IOException e) {
+      throw new RuntimeException(e); // should never happen w/o using real IO
+    }
+  }
+  
+  /**
+   * Returns config set name for collection.
+   *
+   * @param collection to return config set name for
+   */
+  public String readConfigName(String collection) {
+
+    String configName = null;
+
+    String path = COLLECTIONS_ZKNODE + "/" + collection;
+    if (log.isInfoEnabled()) {
+      log.info("Load collection config from:" + path);
+    }
+
+    try {
+      byte[] data = zkClient.getData(path, null, null, true);
+
+      if(data != null) {
+        ZkNodeProps props = ZkNodeProps.load(data);
+        configName = props.getStr(CONFIGNAME_PROP);
+      }
+
+      if (configName != null) {
+        if (!zkClient.exists(CONFIGS_ZKNODE + "/" + configName, true)) {
+          log.error("Specified config does not exist in ZooKeeper:" + 
configName);
+          throw new ZooKeeperException(ErrorCode.SERVER_ERROR,
+              "Specified config does not exist in ZooKeeper:" + configName);
+        } else if (log.isInfoEnabled()) {
+          log.info("path={} {}={} specified config exists in ZooKeeper",
+              new Object[] {path, CONFIGNAME_PROP, configName});
+        }
+      } else {
+        throw new ZooKeeperException(ErrorCode.INVALID_STATE, "No config data 
found at path: " + path);
+      }
+    }
+    catch (KeeperException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading config 
name for collection " + collection, e);
+    }
+    catch (InterruptedException e) {
+      Thread.interrupted();
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading config 
name for collection " + collection, e);
+    }
+
+    return configName;
+  }
+
+
+  private static class ZKTF implements ThreadFactory {
+    private static ThreadGroup tg = new ThreadGroup("ZkStateReader");
+    @Override
+    public Thread newThread(Runnable r) {
+      Thread td = new Thread(tg, r);
+      td.setDaemon(true);
+      return td;
+    }
+  }
+  private ScheduledExecutorService updateCloudExecutor = 
Executors.newScheduledThreadPool(1, new ZKTF());
+
+  private boolean clusterStateUpdateScheduled;
+
+  private final SolrZkClient zkClient;
+  
+  private final boolean closeClient;
+
+  private final ZkCmdExecutor cmdExecutor;
+
+  private volatile Aliases aliases = new Aliases();
+
+  private volatile boolean closed = false;
+
+  public ZkStateReader(SolrZkClient zkClient) {
+    this.zkClient = zkClient;
+    this.cmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
+    this.configManager = new ZkConfigManager(zkClient);
+    this.closeClient = false;
+  }
+
+  public ZkStateReader(String zkServerAddress, int zkClientTimeout, int 
zkClientConnectTimeout) {
+    this.zkClient = new SolrZkClient(zkServerAddress, zkClientTimeout, 
zkClientConnectTimeout,
+        // on reconnect, reload cloud info
+        new OnReconnect() {
+          @Override
+          public void command() {
+            try {
+              ZkStateReader.this.createClusterStateWatchersAndUpdate();
+            } catch (KeeperException e) {
+              log.error("", e);
+              throw new 
ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
+                  "", e);
+            } catch (InterruptedException e) {
+              // Restore the interrupted status
+              Thread.currentThread().interrupt();
+              log.error("", e);
+              throw new 
ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
+                  "", e);
+            }
+          }
+        });
+    this.cmdExecutor = new ZkCmdExecutor(zkClientTimeout);
+    this.configManager = new ZkConfigManager(zkClient);
+    this.closeClient = true;
+  }
+
+  public ZkConfigManager getConfigManager() {
+    return configManager;
+  }
+  
+  // load and publish a new CollectionInfo
+  public void updateClusterState(boolean immediate) throws KeeperException, 
InterruptedException {
+    updateClusterState(immediate, false);
+  }
+  
+  // load and publish a new CollectionInfo
+  public void updateLiveNodes() throws KeeperException, InterruptedException {
+    updateClusterState(true, true);
+  }
+  
+  public Aliases getAliases() {
+    return aliases;
+  }
+
+  public Integer compareStateVersions(String coll, int version) {
+    DocCollection collection = clusterState.getCollectionOrNull(coll);
+    if (collection == null) return null;
+    if (collection.getZNodeVersion() < version) {
+      log.debug("server older than client {}<{}", 
collection.getZNodeVersion(), version);
+      DocCollection nu = getCollectionLive(this, coll);
+      if (nu == null) return -1 ;
+      if (nu.getZNodeVersion() > collection.getZNodeVersion()) {
+        updateWatchedCollection(nu);
+        collection = nu;
+      }
+    }
+    
+    if (collection.getZNodeVersion() == version) {
+      return null;
+    }
+    
+    log.debug("wrong version from client {}!={} ", version, 
collection.getZNodeVersion());
+    
+    return collection.getZNodeVersion();
+  }
+  
+  public synchronized void createClusterStateWatchersAndUpdate() throws 
KeeperException,
+      InterruptedException {
+    // We need to fetch the current cluster state and the set of live nodes
+    
+    synchronized (getUpdateLock()) {
+      cmdExecutor.ensureExists(CLUSTER_STATE, zkClient);
+      cmdExecutor.ensureExists(ALIASES, zkClient);
+      
+      log.info("Updating cluster state from ZooKeeper... ");
+      
+      zkClient.exists(CLUSTER_STATE, new Watcher() {
+        
+        @Override
+        public void process(WatchedEvent event) {
+          // session events are not change events,
+          // and do not remove the watcher
+          if (EventType.None.equals(event.getType())) {
+            return;
+          }
+          log.info("A cluster state change: {}, has occurred - updating... 
(live nodes size: {})", (event) , ZkStateReader.this.clusterState == null ? 0 : 
ZkStateReader.this.clusterState.getLiveNodes().size());
+          try {
+            
+            // delayed approach
+            // ZkStateReader.this.updateClusterState(false, false);
+            synchronized (ZkStateReader.this.getUpdateLock()) {
+              // remake watch
+              final Watcher thisWatch = this;
+              Set<String> ln = ZkStateReader.this.clusterState.getLiveNodes();
+              // update volatile
+              ZkStateReader.this.clusterState = constructState(ln, thisWatch);
+            }
+          } catch (KeeperException e) {
+            if (e.code() == KeeperException.Code.SESSIONEXPIRED
+                || e.code() == KeeperException.Code.CONNECTIONLOSS) {
+              log.warn("ZooKeeper watch triggered, but Solr cannot talk to 
ZK");
+              return;
+            }
+            log.error("", e);
+            throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
+                "", e);
+          } catch (InterruptedException e) {
+            // Restore the interrupted status
+            Thread.currentThread().interrupt();
+            log.warn("", e);
+            return;
+          }
+        }
+        
+      }, true);
+    }
+   
+    
+    synchronized (ZkStateReader.this.getUpdateLock()) {
+      List<String> liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE,
+          new Watcher() {
+            
+            @Override
+            public void process(WatchedEvent event) {
+              // session events are not change events,
+              // and do not remove the watcher
+              if (EventType.None.equals(event.getType())) {
+                return;
+              }
+              try {
+                // delayed approach
+                // ZkStateReader.this.updateClusterState(false, true);
+                synchronized (ZkStateReader.this.getUpdateLock()) {
+                  List<String> liveNodes = zkClient.getChildren(
+                      LIVE_NODES_ZKNODE, this, true);
+                  log.debug("Updating live nodes... ({})", liveNodes.size());
+                  Set<String> liveNodesSet = new HashSet<>();
+                  liveNodesSet.addAll(liveNodes);
+
+                  ClusterState clusterState =  ZkStateReader.this.clusterState;
+
+                  clusterState.setLiveNodes(liveNodesSet);
+                }
+              } catch (KeeperException e) {
+                if (e.code() == KeeperException.Code.SESSIONEXPIRED
+                    || e.code() == KeeperException.Code.CONNECTIONLOSS) {
+                  log.warn("ZooKeeper watch triggered, but Solr cannot talk to 
ZK");
+                  return;
+                }
+                log.error("", e);
+                throw new ZooKeeperException(
+                    SolrException.ErrorCode.SERVER_ERROR, "", e);
+              } catch (InterruptedException e) {
+                // Restore the interrupted status
+                Thread.currentThread().interrupt();
+                log.warn("", e);
+                return;
+              }
+            }
+            
+          }, true);
+    
+      Set<String> liveNodeSet = new HashSet<>();
+      liveNodeSet.addAll(liveNodes);
+      this.clusterState = constructState(liveNodeSet, null);
+
+      zkClient.exists(ALIASES,
+          new Watcher() {
+            
+            @Override
+            public void process(WatchedEvent event) {
+              // session events are not change events,
+              // and do not remove the watcher
+              if (EventType.None.equals(event.getType())) {
+                return;
+              }
+              try {
+                synchronized (ZkStateReader.this.getUpdateLock()) {
+                  log.info("Updating aliases... ");
+
+                  // remake watch
+                  final Watcher thisWatch = this;
+                  Stat stat = new Stat();
+                  byte[] data = zkClient.getData(ALIASES, thisWatch, stat ,
+                      true);
+
+                  Aliases aliases = ClusterState.load(data);
+
+                  ZkStateReader.this.aliases = aliases;
+                }
+              } catch (KeeperException e) {
+                if (e.code() == KeeperException.Code.SESSIONEXPIRED
+                    || e.code() == KeeperException.Code.CONNECTIONLOSS) {
+                  log.warn("ZooKeeper watch triggered, but Solr cannot talk to 
ZK");
+                  return;
+                }
+                log.error("", e);
+                throw new ZooKeeperException(
+                    SolrException.ErrorCode.SERVER_ERROR, "", e);
+              } catch (InterruptedException e) {
+                // Restore the interrupted status
+                Thread.currentThread().interrupt();
+                log.warn("", e);
+                return;
+              }
+            }
+            
+          }, true);
+    }
+    updateAliases();
+    //on reconnect of SolrZkClient re-add watchers for the watched external 
collections
+    synchronized (this) {
+      for (String watchedCollection : watchedCollections) {
+        addZkWatch(watchedCollection);
+      }
+    }
+  }
+
+  private ClusterState constructState(Set<String> ln, Watcher watcher)
+      throws KeeperException, InterruptedException {
+    Stat stat = new Stat();
+    byte[] data = zkClient.getData(CLUSTER_STATE, watcher, stat, true);
+    ClusterState loadedData = ClusterState.load(stat.getVersion(), data, ln,
+        CLUSTER_STATE);
+    Map<String,ClusterState.CollectionRef> result = new LinkedHashMap<>();
+    result.putAll(loadedData.getCollectionStates());// first load all
+                                                    // collections in
+                                                    // clusterstate.json
+    for (String s : getIndividualColls()) {
+      synchronized (this) {
+        if (watchedCollections.contains(s)) {
+          DocCollection live = getCollectionLive(this, s);
+          if (live != null) {
+            watchedCollectionStates.put(s, live);
+            // if it is a watched collection, add too
+            result.put(s, new ClusterState.CollectionRef(live));
+          }
+        } else {
+          // if it is not collection, then just create a reference which can 
fetch
+          // the collection object just in time from ZK
+          // this is also cheap (lazy loaded) so we put it inside the 
synchronized
+          // block although it is not required
+          final String collName = s;
+          result.put(s, new ClusterState.CollectionRef(null) {
+            @Override
+            public DocCollection get() {
+              return getCollectionLive(ZkStateReader.this, collName);
+            }
+
+            @Override
+            public boolean isLazilyLoaded() { return true; }
+          });
+        }
+      }
+    }
+    return new ClusterState(ln, result, stat.getVersion());
+  }
+
+
+  private Set<String> getIndividualColls() throws KeeperException, 
InterruptedException {
+    List<String> children = null;
+    try {
+      children = zkClient.getChildren(COLLECTIONS_ZKNODE, null, true);
+    } catch (KeeperException.NoNodeException e) {
+      log.warn("Error fetching collection names");
+      
+      return new HashSet<>();
+    }
+    if (children == null || children.isEmpty()) return new HashSet<>();
+    HashSet<String> result = new HashSet<>(children.size());
+    
+    for (String c : children) {
+      try {
+        if (zkClient.exists(getCollectionPath(c), true)) {
+          result.add(c);
+        }
+      } catch (Exception e) {
+        log.warn("Error reading collections nodes", e);
+      }
+    }
+    return result;
+  }
+
+  // load and publish a new CollectionInfo
+  private synchronized void updateClusterState(boolean immediate,
+      final boolean onlyLiveNodes) throws KeeperException,
+      InterruptedException {
+    // build immutable CloudInfo
+    
+    if (immediate) {
+      ClusterState clusterState;
+      synchronized (getUpdateLock()) {
+        List<String> liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE, null,
+            true);
+        Set<String> liveNodesSet = new HashSet<>();
+        liveNodesSet.addAll(liveNodes);
+        
+        if (!onlyLiveNodes) {
+          log.debug("Updating cloud state from ZooKeeper... ");
+          
+          clusterState = constructState(liveNodesSet,null);
+        } else {
+          log.debug("Updating live nodes from ZooKeeper... ({})", 
liveNodesSet.size());
+          clusterState = this.clusterState;
+          clusterState.setLiveNodes(liveNodesSet);
+        }
+        this.clusterState = clusterState;
+      }
+      synchronized (ZkStateReader.this) {
+        for (String watchedCollection : watchedCollections) {
+          DocCollection live = getCollectionLive(this, watchedCollection);
+          if (live != null) {
+            updateWatchedCollection(live);
+          }
+        }
+      }
+
+    } else {
+      if (clusterStateUpdateScheduled) {
+        log.debug("Cloud state update for ZooKeeper already scheduled");
+        return;
+      }
+      log.debug("Scheduling cloud state update from ZooKeeper...");
+      clusterStateUpdateScheduled = true;
+      updateCloudExecutor.schedule(new Runnable() {
+        
+        @Override
+        public void run() {
+          log.debug("Updating cluster state from ZooKeeper...");
+          synchronized (getUpdateLock()) {
+            clusterStateUpdateScheduled = false;
+            ClusterState clusterState;
+            try {
+              List<String> liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE,
+                  null, true);
+              Set<String> liveNodesSet = new HashSet<>();
+              liveNodesSet.addAll(liveNodes);
+              
+              if (!onlyLiveNodes) {
+                log.debug("Updating cloud state from ZooKeeper... ");
+
+                clusterState = constructState(liveNodesSet,null);
+              } else {
+                log.debug("Updating live nodes from ZooKeeper... ");
+                clusterState = ZkStateReader.this.clusterState;
+                clusterState.setLiveNodes(liveNodesSet);
+              }
+              
+              ZkStateReader.this.clusterState = clusterState;
+              
+            } catch (KeeperException e) {
+              if (e.code() == KeeperException.Code.SESSIONEXPIRED
+                  || e.code() == KeeperException.Code.CONNECTIONLOSS) {
+                log.warn("ZooKeeper watch triggered, but Solr cannot talk to 
ZK");
+                return;
+              }
+              log.error("", e);
+              throw new ZooKeeperException(
+                  SolrException.ErrorCode.SERVER_ERROR, "", e);
+            } catch (InterruptedException e) {
+              // Restore the interrupted status
+              Thread.currentThread().interrupt();
+              log.error("", e);
+              throw new ZooKeeperException(
+                  SolrException.ErrorCode.SERVER_ERROR, "", e);
+            } 
+            // update volatile
+            ZkStateReader.this.clusterState = clusterState;
+
+            synchronized (ZkStateReader.this) {
+              for (String watchedCollection : watchedCollections) {
+                DocCollection live = getCollectionLive(ZkStateReader.this, 
watchedCollection);
+                assert live != null;
+                if (live != null) {
+                  updateWatchedCollection(live);
+                }
+              }
+            }
+          }
+        }
+      }, SOLRCLOUD_UPDATE_DELAY, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  /**
+   * @return information about the cluster from ZooKeeper
+   */
+  public ClusterState getClusterState() {
+    return clusterState;
+  }
+  
+  public Object getUpdateLock() {
+    return this;
+  }
+
+  public void close() {
+    this.closed  = true;
+    if (closeClient) {
+      zkClient.close();
+    }
+  }
+  
+  abstract class RunnableWatcher implements Runnable {
+    Watcher watcher;
+    public RunnableWatcher(Watcher watcher){
+      this.watcher = watcher;
+    }
+
+  }
+  
+  public String getLeaderUrl(String collection, String shard, int timeout)
+      throws InterruptedException, KeeperException {
+    ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderRetry(collection,
+        shard, timeout));
+    return props.getCoreUrl();
+  }
+  
+  /**
+   * Get shard leader properties, with retry if none exist.
+   */
+  public Replica getLeaderRetry(String collection, String shard) throws 
InterruptedException {
+    return getLeaderRetry(collection, shard, 4000);
+  }
+
+  /**
+   * Get shard leader properties, with retry if none exist.
+   */
+  public Replica getLeaderRetry(String collection, String shard, int timeout) 
throws InterruptedException {
+    long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, 
TimeUnit.MILLISECONDS);
+    while (System.nanoTime() < timeoutAt && !closed) {
+      if (clusterState != null) {    
+        Replica replica = clusterState.getLeader(collection, shard);
+        if (replica != null && 
getClusterState().liveNodesContain(replica.getNodeName())) {
+          return replica;
+        }
+      }
+      Thread.sleep(50);
+    }
+    throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "No registered 
leader was found after waiting for "
+        + timeout + "ms " + ", collection: " + collection + " slice: " + 
shard);
+  }
+
+  /**
+   * Get path where shard leader properties live in zookeeper.
+   */
+  public static String getShardLeadersPath(String collection, String shardId) {
+    return COLLECTIONS_ZKNODE + "/" + collection + "/"
+        + SHARD_LEADERS_ZKNODE + (shardId != null ? ("/" + shardId)
+        : "");
+  }
+
+  /**
+   * Get path where shard leader elections ephemeral nodes are.
+   */
+  public static String getShardLeadersElectPath(String collection, String 
shardId) {
+    return COLLECTIONS_ZKNODE + "/" + collection + "/"
+        + LEADER_ELECT_ZKNODE  + (shardId != null ? ("/" + shardId + "/" + 
ELECTION_NODE)
+        : "");
+  }
+
+
+  public List<ZkCoreNodeProps> getReplicaProps(String collection,
+      String shardId, String thisCoreNodeName) {
+    return getReplicaProps(collection, shardId, thisCoreNodeName, null);
+  }
+  
+  public List<ZkCoreNodeProps> getReplicaProps(String collection,
+      String shardId, String thisCoreNodeName, String mustMatchStateFilter) {
+    return getReplicaProps(collection, shardId, thisCoreNodeName, 
mustMatchStateFilter, null);
+  }
+  
+  public List<ZkCoreNodeProps> getReplicaProps(String collection,
+      String shardId, String thisCoreNodeName, String mustMatchStateFilter, 
String mustNotMatchStateFilter) {
+    assert thisCoreNodeName != null;
+    ClusterState clusterState = this.clusterState;
+    if (clusterState == null) {
+      return null;
+    }
+    Map<String,Slice> slices = clusterState.getSlicesMap(collection);
+    if (slices == null) {
+      throw new ZooKeeperException(ErrorCode.BAD_REQUEST,
+          "Could not find collection in zk: " + collection + " "
+              + clusterState.getCollections());
+    }
+    
+    Slice replicas = slices.get(shardId);
+    if (replicas == null) {
+      throw new ZooKeeperException(ErrorCode.BAD_REQUEST, "Could not find 
shardId in zk: " + shardId);
+    }
+    
+    Map<String,Replica> shardMap = replicas.getReplicasMap();
+    List<ZkCoreNodeProps> nodes = new ArrayList<>(shardMap.size());
+    for (Entry<String,Replica> entry : shardMap.entrySet()) {
+      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
+      
+      String coreNodeName = entry.getValue().getName();
+      
+      if (clusterState.liveNodesContain(nodeProps.getNodeName()) && 
!coreNodeName.equals(thisCoreNodeName)) {
+        if (mustMatchStateFilter == null || 
mustMatchStateFilter.equals(nodeProps.getState())) {
+          if (mustNotMatchStateFilter == null || 
!mustNotMatchStateFilter.equals(nodeProps.getState())) {
+            nodes.add(nodeProps);
+          }
+        }
+      }
+    }
+    if (nodes.size() == 0) {
+      // no replicas
+      return null;
+    }
+
+    return nodes;
+  }
+
+  public SolrZkClient getZkClient() {
+    return zkClient;
+  }
+
+  public void updateAliases() throws KeeperException, InterruptedException {
+    byte[] data = zkClient.getData(ALIASES, null, null, true);
+
+    Aliases aliases = ClusterState.load(data);
+
+    ZkStateReader.this.aliases = aliases;
+  }
+  public Map getClusterProps(){
+    Map result = null;
+    try {
+      if(getZkClient().exists(ZkStateReader.CLUSTER_PROPS, true)){
+        result = (Map) 
ZkStateReader.fromJSON(getZkClient().getData(ZkStateReader.CLUSTER_PROPS, null, 
new Stat(), true)) ;
+      } else {
+        result= new LinkedHashMap();
+      }
+      return result;
+    } catch (Exception e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR,"Error reading cluster 
properties",e) ;
+    }
+  }
+  
+  /**
+   * Returns the baseURL corresponding to a given node's nodeName --
+   * NOTE: does not (currently) imply that the nodeName (or resulting 
+   * baseURL) exists in the cluster.
+   * @lucene.experimental
+   */
+  public String getBaseUrlForNodeName(final String nodeName) {
+    final int _offset = nodeName.indexOf("_");
+    if (_offset < 0) {
+      throw new IllegalArgumentException("nodeName does not contain expected 
'_' seperator: " + nodeName);
+    }
+    final String hostAndPort = nodeName.substring(0,_offset);
+    try {
+      final String path = URLDecoder.decode(nodeName.substring(1+_offset), 
"UTF-8");
+      String urlScheme = (String) getClusterProps().get(URL_SCHEME);
+      if(urlScheme == null) {
+        urlScheme = "http";
+      }
+      return urlScheme + "://" + hostAndPort + (path.isEmpty() ? "" : ("/" + 
path));
+    } catch (UnsupportedEncodingException e) {
+      throw new IllegalStateException("JVM Does not seem to support UTF-8", e);
+    }
+  }
+
+  public static DocCollection getCollectionLive(ZkStateReader zkStateReader,
+      String coll) {
+    String collectionPath = getCollectionPath(coll);
+    try {
+      if (!zkStateReader.getZkClient().exists(collectionPath, true)) return 
null;
+      Stat stat = new Stat();
+      byte[] data = zkStateReader.getZkClient().getData(collectionPath, null, 
stat, true);
+      ClusterState state = ClusterState.load(stat.getVersion(), data,
+          Collections.<String> emptySet(), collectionPath);
+      ClusterState.CollectionRef collectionRef = 
state.getCollectionStates().get(coll);
+      return collectionRef == null ? null : collectionRef.get();
+    } catch (KeeperException.NoNodeException e) {
+      log.warn("No node available : " + collectionPath, e);
+      return null;
+    } catch (KeeperException e) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Could not load collection from ZK:" + coll, e);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Could not load collection from ZK:" + coll, e);
+    }
+  }
+
+  public static String getCollectionPath(String coll) {
+    return COLLECTIONS_ZKNODE+"/"+coll + "/state.json";
+  }
+
+  public void addCollectionWatch(String coll) throws KeeperException, 
InterruptedException {
+    synchronized (this) {
+      if (watchedCollections.contains(coll)) return;
+      else {
+        watchedCollections.add(coll);
+      }
+      addZkWatch(coll);
+    }
+  }
+
+  private void addZkWatch(final String coll) throws KeeperException,
+      InterruptedException {
+    log.info("addZkWatch {}", coll);
+    final String fullpath = getCollectionPath(coll);
+    synchronized (getUpdateLock()) {
+      
+      cmdExecutor.ensureExists(fullpath, zkClient);
+      log.info("Updating collection state at {} from ZooKeeper... ", fullpath);
+      
+      Watcher watcher = new Watcher() {
+        
+        @Override
+        public void process(WatchedEvent event) {
+          // session events are not change events,
+          // and do not remove the watcher
+          if (EventType.None.equals(event.getType())) {
+            return;
+          }
+          log.info("A cluster state change: {} for collection {} has occurred 
- updating... (live nodes size: {})",
+              (event), coll, ZkStateReader.this.clusterState == null ? 0
+                  : ZkStateReader.this.clusterState.getLiveNodes().size());
+          try {
+            
+            // delayed approach
+            // ZkStateReader.this.updateClusterState(false, false);
+            synchronized (ZkStateReader.this.getUpdateLock()) {
+              if (!watchedCollections.contains(coll)) {
+                log.info("Unwatched collection {}", coll);
+                return;
+              }
+              // remake watch
+              final Watcher thisWatch = this;
+              Stat stat = new Stat();
+              byte[] data = zkClient.getData(fullpath, thisWatch, stat, true);
+              
+              if (data == null || data.length == 0) {
+                log.warn("No value set for collection state : {}", coll);
+                return;
+                
+              }
+              ClusterState clusterState = ClusterState.load(stat.getVersion(),
+                  data, Collections.<String> emptySet(), fullpath);
+              // update volatile
+              
+              DocCollection newState = clusterState.getCollectionStates()
+                  .get(coll).get();
+              updateWatchedCollection(newState);
+              
+            }
+          } catch (KeeperException e) {
+            if (e.code() == KeeperException.Code.SESSIONEXPIRED
+                || e.code() == KeeperException.Code.CONNECTIONLOSS) {
+              log.warn("ZooKeeper watch triggered, but Solr cannot talk to 
ZK");
+              return;
+            }
+            log.error("Unwatched collection :" + coll, e);
+            throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "", e);
+            
+          } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            log.error("Unwatched collection :" + coll, e);
+            return;
+          }
+        }
+        
+      };
+      zkClient.exists(fullpath, watcher, true);
+    }
+    DocCollection collection = getCollectionLive(this, coll);
+    if (collection != null) {
+      updateWatchedCollection(collection);
+    }
+  }
+  
+  private void updateWatchedCollection(DocCollection newState) {
+    watchedCollectionStates.put(newState.getName(), newState);
+    log.info("Updating data for {} to ver {} ", newState.getName(),
+        newState.getZNodeVersion());
+    
+    this.clusterState = clusterState.copyWith(newState.getName(), newState);
+  }
+  
+  /** This is not a public API. Only used by ZkController */
+  public void removeZKWatch(final String coll) {
+    synchronized (this) {
+      watchedCollections.remove(coll);
+      watchedCollectionStates.remove(coll);
+      try {
+        updateClusterState(true);
+      } catch (KeeperException e) {
+        log.error("Error updating state",e);
+      } catch (InterruptedException e) {
+        log.error("Error updating state",e);
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZooKeeperException.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZooKeeperException.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZooKeeperException.java
new file mode 100644
index 0000000..c712dea
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/ZooKeeperException.java
@@ -0,0 +1,33 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.solr.common.SolrException;
+
+public class ZooKeeperException extends SolrException {
+
+  public ZooKeeperException(ErrorCode code, String msg, Throwable th) {
+    super(code, msg, th);
+  }
+  
+  public ZooKeeperException(ErrorCode code, String msg) {
+    super(code, msg);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/cloud/package-info.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/cloud/package-info.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/package-info.java
new file mode 100644
index 0000000..04ef67c
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/cloud/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Common Solr Cloud and ZooKeeper related classes reused on both clients 
&amp; server.
+ */
+package org.apache.solr.common.cloud;
+
+

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/luke/FieldFlag.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/luke/FieldFlag.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/luke/FieldFlag.java
new file mode 100644
index 0000000..720c0b6
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/luke/FieldFlag.java
@@ -0,0 +1,70 @@
+package org.apache.solr.common.luke;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ *
+ * @since solr 1.3
+ */
+public enum FieldFlag {
+  INDEXED('I', "Indexed"), 
+  TOKENIZED('T', "Tokenized"), 
+  STORED('S', "Stored"), 
+  DOC_VALUES('D', "DocValues"),
+  MULTI_VALUED('M', "Multivalued"),
+  TERM_VECTOR_STORED('V', "TermVector Stored"), 
+  TERM_VECTOR_OFFSET('o', "Store Offset With TermVector"),
+  TERM_VECTOR_POSITION('p', "Store Position With TermVector"),
+  OMIT_NORMS('O', "Omit Norms"), 
+  OMIT_TF('F', "Omit Term Frequencies & Positions"), 
+  OMIT_POSITIONS('P', "Omit Positions"),
+  STORE_OFFSETS_WITH_POSITIONS('H', "Store Offsets with Positions"),
+  LAZY('L', "Lazy"), 
+  BINARY('B', "Binary"), 
+  SORT_MISSING_FIRST('f', "Sort Missing First"), 
+  SORT_MISSING_LAST('l', "Sort Missing Last");
+
+  private final char abbreviation;
+  private final String display;
+
+  FieldFlag(char abbreviation, String display) {
+    this.abbreviation = abbreviation;
+    this.display = display;
+    this.display.intern();//QUESTION:  Need we bother here?
+  }
+
+  public static FieldFlag getFlag(char abbrev){
+    FieldFlag result = null;
+    FieldFlag [] vals = FieldFlag.values();
+    for (int i = 0; i < vals.length; i++) {
+      if (vals[i].getAbbreviation() == abbrev){
+        result = vals[i];
+        break;
+      }
+    }
+    return result;
+  }
+
+  public char getAbbreviation() {
+    return abbreviation;
+  }
+
+  public String getDisplay() {
+    return display;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/luke/package-info.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/luke/package-info.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/luke/package-info.java
new file mode 100644
index 0000000..e710d39
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/luke/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Common constants used by the <code>LukeRequestHandler</code>.
+ */
+package org.apache.solr.common.luke;
+
+

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/package-info.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/package-info.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/package-info.java
new file mode 100644
index 0000000..690a83c
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Common classes reused on both clients &amp; server for dealing with {@link 
org.apache.solr.common.SolrInputDocument documents to be indexed} and {@link 
org.apache.solr.common.SolrDocumentList result documents}.
+ */
+package org.apache.solr.common;
+
+

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/AnalysisParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/AnalysisParams.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/AnalysisParams.java
new file mode 100644
index 0000000..36c276f
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/AnalysisParams.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+/**
+ * Defines the request parameters used by all analysis request handlers.
+ *
+ *
+ * @since solr 1.4
+ */
+public interface AnalysisParams {
+
+  /**
+   * The prefix for all parameters.
+   */
+  static final String PREFIX = "analysis";
+
+  /**
+   * Holds the query to be analyzed.
+   */
+  static final String QUERY = PREFIX + ".query";
+
+  /**
+   * Set to {@code true} to indicate that the index tokens that match query 
tokens should be marked as "mateched".
+   */
+  static final String SHOW_MATCH = PREFIX + ".showmatch";
+
+
+  //===================================== FieldAnalysisRequestHandler Params 
=========================================
+
+  /**
+   * Holds the value of the field which should be analyzed.
+   */
+  static final String FIELD_NAME = PREFIX + ".fieldname";
+
+  /**
+   * Holds a comma-separated list of field types that the analysis should be 
peformed for.
+   */
+  static final String FIELD_TYPE = PREFIX + ".fieldtype";
+
+  /**
+   * Hodls a comma-separated list of field named that the analysis should be 
performed for.
+   */
+  static final String FIELD_VALUE = PREFIX + ".fieldvalue";
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/AppendedSolrParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/AppendedSolrParams.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/AppendedSolrParams.java
new file mode 100644
index 0000000..4c9cc2e
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/AppendedSolrParams.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+/**
+ * SolrParams wrapper which acts similar to DefaultSolrParams except that
+ * it "appends" the values of multi-value params from both sub instances, so
+ * that all of the values are returned. 
+ */
+public class AppendedSolrParams extends DefaultSolrParams {
+
+  public static AppendedSolrParams wrapAppended(SolrParams params, SolrParams 
extra) {
+    return new AppendedSolrParams(params, extra);
+  }
+
+  private AppendedSolrParams(SolrParams main, SolrParams extra) {
+    super(main, extra);
+  }
+
+  @Override
+  public String[] getParams(String param) {
+    String[] main = params.getParams(param);
+    String[] extra = defaults.getParams(param);
+    if (null == extra || 0 == extra.length) {
+      return main;
+    }
+    if (null == main || 0 == main.length) {
+      return extra;
+    }
+    String[] result = new String[main.length + extra.length];
+    System.arraycopy(main,0,result,0,main.length);
+    System.arraycopy(extra,0,result,main.length,extra.length);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "{main("+params+"),extra("+defaults+")}";
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/CollectionParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/CollectionParams.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CollectionParams.java
new file mode 100644
index 0000000..be8906e
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CollectionParams.java
@@ -0,0 +1,74 @@
+package org.apache.solr.common.params;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Locale;
+
+public interface CollectionParams 
+{
+  /** What action **/
+  public final static String ACTION = "action";
+  public final static String NAME = "name";
+  
+
+
+  public enum CollectionAction {
+    CREATE,
+    DELETE,
+    RELOAD,
+    SYNCSHARD,
+    CREATEALIAS,
+    DELETEALIAS,
+    SPLITSHARD,
+    DELETESHARD,
+    CREATESHARD,
+    DELETEREPLICA,
+    MIGRATE,
+    ADDROLE,
+    REMOVEROLE,
+    CLUSTERPROP,
+    REQUESTSTATUS,
+    ADDREPLICA,
+    OVERSEERSTATUS,
+    LIST,
+    CLUSTERSTATUS,
+    ADDREPLICAPROP,
+    DELETEREPLICAPROP,
+    BALANCESHARDUNIQUE,
+    REBALANCELEADERS;
+    
+    public static CollectionAction get( String p )
+    {
+      if( p != null ) {
+        try {
+          return CollectionAction.valueOf( p.toUpperCase(Locale.ROOT) );
+        }
+        catch( Exception ex ) {}
+      }
+      return null; 
+    }
+    public boolean isEqual(String s){
+      if(s == null) return false;
+      return toString().equals(s.toUpperCase(Locale.ROOT));
+    }
+    public String toLower(){
+      return toString().toLowerCase(Locale.ROOT);
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/CommonParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/CommonParams.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CommonParams.java
new file mode 100644
index 0000000..699059a
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/params/CommonParams.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+import java.util.Locale;
+
+
+/**
+ * Parameters used across many handlers
+ */
+public interface CommonParams {
+
+  /** 
+   * Override for the concept of "NOW" to be used throughout this request, 
+   * expressed as milliseconds since epoch.  This is primarily used in 
+   * distributed search to ensure consistent time values are used across 
+   * multiple sub-requests.
+   */
+  public static final String NOW = "NOW";
+
+  /** 
+   * Specifies the TimeZone used by the client for the purposes of 
+   * any DateMath rounding that may take place when executing the request
+   */
+  public static final String TZ = "TZ";
+
+  /** the Request Handler (formerly known as the Query Type) - which Request 
Handler should handle the request */
+  public static final String QT ="qt";
+  
+  /** the response writer type - the format of the response */
+  public static final String WT ="wt";
+  
+  /** query string */
+  public static final String Q ="q";
+
+  /** rank query */
+  public static final String RQ ="rq";
+  
+  /** distrib string */
+  public static final String DISTRIB = "distrib";
+  
+  /** sort order */
+  public static final String SORT ="sort";
+  
+  /** Lucene query string(s) for filtering the results without affecting 
scoring */
+  public static final String FQ ="fq";
+  
+  /** zero based offset of matching documents to retrieve */
+  public static final String START ="start";
+  
+  /** number of documents to return starting at "start" */
+  public static final String ROWS ="rows";
+
+  // SOLR-4228 start
+  /** handler value for SolrPing */
+  public static final String PING_HANDLER = "/admin/ping";
+  
+  /** "action" parameter for SolrPing */
+  public static final String ACTION = "action";
+  
+  /** "disable" value for SolrPing action */
+  public static final String DISABLE = "disable";
+  
+  /** "enable" value for SolrPing action */
+  public static final String ENABLE = "enable";
+  
+  /** "ping" value for SolrPing action */
+  public static final String PING = "ping";
+  // SOLR-4228 end
+
+  /** stylesheet to apply to XML results */
+  public static final String XSL ="xsl";
+  
+  /** version parameter to check request-response compatibility */
+  public static final String VERSION ="version";
+  
+  /** query and init param for field list */
+  public static final String FL = "fl";
+  
+  /** default query field */
+  public static final String DF = "df";
+
+  /** Transformer param -- used with XSLT */
+  public static final String TR = "tr";
+  
+  /** whether to include debug data for all components pieces, including doing 
explains*/
+  public static final String DEBUG_QUERY = "debugQuery";
+
+  /**
+   * Whether to provide debug info for specific items.
+   *
+   * @see #DEBUG_QUERY
+   */
+  public static final String DEBUG = "debug";
+
+  /**
+   * {@link #DEBUG} value indicating an interest in debug output related to 
timing
+   */
+  public static final String TIMING = "timing";
+  /**
+   * {@link #DEBUG} value indicating an interest in debug output related to 
the results (explains)
+   */
+  public static final String RESULTS = "results";
+  /**
+   * {@link #DEBUG} value indicating an interest in debug output related to 
the Query (parsing, etc.)
+   */
+  public static final String QUERY = "query";
+  /**
+   * {@link #DEBUG} value indicating an interest in debug output related to 
the distributed tracking
+   */
+  public static final String TRACK = "track";
+  /** 
+   * boolean indicating whether score explanations should structured (true), 
+   * or plain text (false)
+   */
+  public static final String EXPLAIN_STRUCT = "debug.explain.structured";
+  
+  /** another query to explain against */
+  public static final String EXPLAIN_OTHER = "explainOther";
+  
+
+  /** If the content stream should come from a URL (using URLConnection) */
+  public static final String STREAM_URL = "stream.url";
+
+  /** If the content stream should come from a File (using FileReader) */
+  public static final String STREAM_FILE = "stream.file";
+  
+  /** If the content stream should come directly from a field */
+  public static final String STREAM_BODY = "stream.body";
+  
+  /** 
+   * Explicitly set the content type for the input stream
+   * If multiple streams are specified, the explicit contentType
+   * will be used for all of them.  
+   */
+  public static final String STREAM_CONTENTTYPE = "stream.contentType";
+  
+  /**
+   * Timeout value in milliseconds.  If not set, or the value is &gt;= 0, 
there is no timeout.
+   */
+  public static final String TIME_ALLOWED = "timeAllowed";
+  
+  /** 'true' if the header should include the handler name */
+  public static final String HEADER_ECHO_HANDLER = "echoHandler";
+  
+  /** include the parameters in the header **/
+  public static final String HEADER_ECHO_PARAMS = "echoParams";
+
+  /** include header in the response */
+  public static final String OMIT_HEADER = "omitHeader";
+
+  /** valid values for: <code>echoParams</code> */
+  public enum EchoParamStyle {
+    EXPLICIT,
+    ALL,
+    NONE;
+    
+    public static EchoParamStyle get( String v ) {
+      if( v != null ) {
+        v = v.toUpperCase(Locale.ROOT);
+        if( v.equals( "EXPLICIT" ) ) {
+          return EXPLICIT;
+        }
+        if( v.equals( "ALL") ) {
+          return ALL;
+        }
+        if( v.equals( "NONE") ) {  // the same as nothing...
+          return NONE;
+        }
+      }
+      return null;
+    }
+  };
+
+  /** which parameters to log (if not supplied all parameters will be logged) 
**/
+  public static final String LOG_PARAMS_LIST = "logParamsList";
+
+  public static final String EXCLUDE = "ex";
+  public static final String TAG = "tag";
+  public static final String TERMS = "terms";
+  public static final String OUTPUT_KEY = "key";
+  public static final String FIELD = "f";
+  public static final String VALUE = "v";
+  public static final String THREADS = "threads";
+  public static final String TRUE = Boolean.TRUE.toString();
+  public static final String FALSE = Boolean.FALSE.toString();
+
+  /** Used as a local parameter on queries.  cache=false means don't check any 
query or filter caches.
+   * cache=true is the default.
+   */
+  public static final String CACHE = "cache";
+
+  /** Used as a local param on filter queries in conjunction with cache=false. 
 Filters are checked in order, from
+   * smallest cost to largest. If cost&gt;=100 and the query implements 
PostFilter, then that interface will be used to do post query filtering.
+   */
+  public static final String COST = "cost";
+
+  /**
+   * Request ID parameter added to the request when using debug=track
+   */
+  public static final String REQUEST_ID = "rid";
+
+  /**
+   * Request Purpose parameter added to each internal shard request when using 
debug=track
+   */
+  public static final String REQUEST_PURPOSE = "requestPurpose";
+
+  /**
+   * When querying a node, prefer local node's cores for distributed queries.
+   */
+  public static final String PREFER_LOCAL_SHARDS = "preferLocalShards";
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/CoreAdminParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/CoreAdminParams.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CoreAdminParams.java
new file mode 100644
index 0000000..f673c6c
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CoreAdminParams.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+import java.util.Locale;
+
+/**
+ * @since solr 1.3
+ */
+public abstract class CoreAdminParams
+{
+  /** What Core are we talking about **/
+  public final static String CORE = "core";
+
+  /** Should the STATUS request include index info **/
+  public final static String INDEX_INFO = "indexInfo";
+
+  /** Persistent -- should it save the cores state? **/
+  public final static String PERSISTENT = "persistent";
+  
+  /** If you rename something, what is the new name **/
+  public final static String NAME = "name";
+
+  /** Core data directory **/
+  public final static String DATA_DIR = "dataDir";
+
+  /** Core updatelog directory **/
+  public final static String ULOG_DIR = "ulogDir";
+
+  /** Name of the other core in actions involving 2 cores **/
+  public final static String OTHER = "other";
+
+  /** What action **/
+  public final static String ACTION = "action";
+  
+  /** If you specify a schema, what is its name **/
+  public final static String SCHEMA = "schema";
+
+  /** If you specify a configset, what is its name **/
+  public final static String CONFIGSET = "configSet";
+  
+  /** If you specify a config, what is its name **/
+  public final static String CONFIG = "config";
+  
+  /** Specifies a core instance dir. */
+  public final static String INSTANCE_DIR = "instanceDir";
+
+  /** If you specify a file, what is its name **/
+  public final static String FILE = "file";
+  
+  /** If you merge indexes, what are the index directories.
+   * The directories are specified by multiple indexDir parameters. */
+  public final static String INDEX_DIR = "indexDir";
+
+  /** If you merge indexes, what is the source core's name
+   * More than one source core can be specified by multiple srcCore parameters 
*/
+  public final static String SRC_CORE = "srcCore";
+
+  /** The collection name in solr cloud */
+  public final static String COLLECTION = "collection";
+
+  /** The shard id in solr cloud */
+  public final static String SHARD = "shard";
+  
+  /** The shard range in solr cloud */
+  public final static String SHARD_RANGE = "shard.range";
+
+  /** The shard range in solr cloud */
+  public final static String SHARD_STATE = "shard.state";
+
+  /** The parent shard if applicable */
+  public final static String SHARD_PARENT = "shard.parent";
+
+  /** The target core to which a split index should be written to
+   * Multiple targetCores can be specified by multiple targetCore parameters */
+  public final static String TARGET_CORE = "targetCore";
+
+  /** The hash ranges to be used to split a shard or an index */
+  public final static String RANGES = "ranges";
+  
+  public static final String ROLES = "roles";
+
+  public static final String REQUESTID = "requestid";
+
+  public static final String CORE_NODE_NAME = "coreNodeName";
+  
+  /** Prefix for core property name=value pair **/
+  public final static String PROPERTY_PREFIX = "property.";
+
+  /** If you unload a core, delete the index too */
+  public final static String DELETE_INDEX = "deleteIndex";
+
+  public static final String DELETE_DATA_DIR = "deleteDataDir";
+
+  public static final String DELETE_INSTANCE_DIR = "deleteInstanceDir";
+
+  public static final String LOAD_ON_STARTUP = "loadOnStartup";
+  
+  public static final String TRANSIENT = "transient";
+
+  public enum CoreAdminAction {
+    STATUS,  
+    LOAD,
+    UNLOAD,
+    RELOAD,
+    CREATE,
+    PERSIST,
+    SWAP,
+    RENAME,
+    MERGEINDEXES,
+    SPLIT,
+    PREPRECOVERY,
+    REQUESTRECOVERY, 
+    REQUESTSYNCSHARD,
+    CREATEALIAS,
+    DELETEALIAS,
+    REQUESTBUFFERUPDATES,
+    REQUESTAPPLYUPDATES,
+    LOAD_ON_STARTUP,
+    TRANSIENT,
+    OVERSEEROP,
+    REQUESTSTATUS,
+    REJOINLEADERELECTION;
+    
+    public static CoreAdminAction get( String p )
+    {
+      if( p != null ) {
+        try {
+          return CoreAdminAction.valueOf( p.toUpperCase(Locale.ROOT) );
+        }
+        catch( Exception ex ) {}
+      }
+      return null; 
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/CursorMarkParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/CursorMarkParams.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CursorMarkParams.java
new file mode 100644
index 0000000..c20cfd9
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/CursorMarkParams.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+/**
+ * Parameters and constants used when dealing with cursor based requests 
across 
+ * large sorted result sets.
+ */
+public interface CursorMarkParams {
+
+  /**
+   * Param clients should specify indicating that they want a cursor based 
search.
+   * The value specified must either be {@link #CURSOR_MARK_START} indicating 
the 
+   * first page of results, or a value returned by a previous search via the 
+   * {@link #CURSOR_MARK_NEXT} key.
+   */
+  public static final String CURSOR_MARK_PARAM = "cursorMark";
+
+  /**
+   * Key used in Solr response to inform the client what the "next" 
+   * {@link #CURSOR_MARK_PARAM} value should be to continue pagination
+   */
+  public static final String CURSOR_MARK_NEXT = "nextCursorMark";
+
+  /** 
+   * Special value for {@link #CURSOR_MARK_PARAM} indicating that cursor 
functionality 
+   * should be used, and a new cursor value should be computed afte the last 
result,
+   * but that currently the "first page" of results is being requested
+   */
+  public static final String CURSOR_MARK_START = "*";
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/DefaultSolrParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/DefaultSolrParams.java
 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/DefaultSolrParams.java
new file mode 100644
index 0000000..1f308cd
--- /dev/null
+++ 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/DefaultSolrParams.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+
+/**
+ *
+ */
+public class DefaultSolrParams extends SolrParams {
+
+  protected final SolrParams params;
+  protected final SolrParams defaults;
+
+  protected DefaultSolrParams(SolrParams params, SolrParams defaults) {
+    assert params != null && defaults != null;
+    this.params = params;
+    this.defaults = defaults;
+  }
+
+  @Override
+  public String get(String param) {
+    String val = params.get(param);
+    return val!=null ? val : defaults.get(param);
+  }
+
+  @Override
+  public String[] getParams(String param) {
+    String[] vals = params.getParams(param);
+    return vals!=null ? vals : defaults.getParams(param);
+  }
+
+  @Override
+  public Iterator<String> getParameterNamesIterator() {
+    // We need to compute the set of all param names in advance 
+    // So we don't wind up with an iterator that returns the same
+    // String more then once (SOLR-6780)
+    LinkedHashSet<String> allKeys = new LinkedHashSet<>();
+    for (SolrParams p : new SolrParams [] {params, defaults}) {
+      Iterator<String> localKeys = p.getParameterNamesIterator();
+      while (localKeys.hasNext()) {
+        allKeys.add(localKeys.next());
+      }
+    }
+    return allKeys.iterator();
+  }
+
+  @Override
+  public String toString() {
+    return "{params("+params+"),defaults("+defaults+")}";
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/DisMaxParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/DisMaxParams.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/DisMaxParams.java
new file mode 100644
index 0000000..c2c268e
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/params/DisMaxParams.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+    
+
+/**
+ * A collection of params used in DisMaxRequestHandler,
+ * both for Plugin initialization and for Requests.
+ */
+public interface DisMaxParams {
+  
+  /** query and init param for tiebreaker value */
+  public static String TIE = "tie";
+  
+  /** query and init param for query fields */
+  public static String QF = "qf";
+  
+  /** query and init param for phrase boost fields */
+  public static String PF = "pf";
+  
+  /** query and init param for bigram phrase boost fields */
+  public static String PF2 = "pf2";
+  
+  /** query and init param for trigram phrase boost fields */
+  public static String PF3 = "pf3";
+  
+  /** query and init param for MinShouldMatch specification */
+  public static String MM = "mm";
+  
+  /**
+   * query and init param for Phrase Slop value in phrase
+   * boost query (in pf fields)
+   */
+  public static String PS = "ps";
+  
+  /** default phrase slop for bigram phrases (pf2)  */
+  public static String PS2 = "ps2";
+  
+  /** default phrase slop for bigram phrases (pf3)  */
+  public static String PS3 = "ps3";
+    
+  /**
+   * query and init param for phrase Slop value in phrases
+   * explicitly included in the user's query string ( in qf fields)
+   */
+  public static String QS = "qs";
+  
+  /** query and init param for boosting query */
+  public static String BQ = "bq";
+  
+  /** query and init param for boosting functions */
+  public static String BF = "bf";
+  
+  /**
+   * Alternate query (expressed in Solr QuerySyntax)
+   * to use if main query (q) is empty
+   */
+  public static String ALTQ = "q.alt";
+  
+  /** query and init param for field list */
+  public static String GEN = "gen";
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/EventParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/EventParams.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/EventParams.java
new file mode 100644
index 0000000..f5d43c0
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/params/EventParams.java
@@ -0,0 +1,29 @@
+package org.apache.solr.common.params;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ *
+ *
+ **/
+public interface EventParams {
+  /** Event param for things like newSearcher, firstSearcher**/
+  public static final String EVENT = "event";
+  public static final String NEW_SEARCHER = "newSearcher";
+  public static final String FIRST_SEARCHER = "firstSearcher";
+}

http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/40aa090d/ranger_solrj/src/main/java/org/apache/solr/common/params/ExpandParams.java
----------------------------------------------------------------------
diff --git 
a/ranger_solrj/src/main/java/org/apache/solr/common/params/ExpandParams.java 
b/ranger_solrj/src/main/java/org/apache/solr/common/params/ExpandParams.java
new file mode 100644
index 0000000..a8f0cf7
--- /dev/null
+++ b/ranger_solrj/src/main/java/org/apache/solr/common/params/ExpandParams.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.params;
+
+/**
+ * Expand parameters
+ */
+public interface ExpandParams {
+
+  public static final String EXPAND = "expand";
+  public static final String EXPAND_SORT = EXPAND + ".sort";
+  public static final String EXPAND_ROWS = EXPAND + ".rows";
+  public static final String EXPAND_FIELD = EXPAND + ".field";
+  public static final String EXPAND_Q = EXPAND + ".q";
+  public static final String EXPAND_FQ = EXPAND + ".fq";
+}
+

Reply via email to