http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PSchemaProtos.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PSchemaProtos.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PSchemaProtos.java
new file mode 100644
index 0000000..d7dc720
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PSchemaProtos.java
@@ -0,0 +1,666 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: PSchema.proto
+
+package org.apache.phoenix.coprocessor.generated;
+
+public final class PSchemaProtos {
+  private PSchemaProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface PSchemaOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string schemaName = 1;
+    /**
+     * <code>required string schemaName = 1;</code>
+     */
+    boolean hasSchemaName();
+    /**
+     * <code>required string schemaName = 1;</code>
+     */
+    java.lang.String getSchemaName();
+    /**
+     * <code>required string schemaName = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getSchemaNameBytes();
+
+    // required int64 timeStamp = 2;
+    /**
+     * <code>required int64 timeStamp = 2;</code>
+     */
+    boolean hasTimeStamp();
+    /**
+     * <code>required int64 timeStamp = 2;</code>
+     */
+    long getTimeStamp();
+  }
+  /**
+   * Protobuf type {@code PSchema}
+   */
+  public static final class PSchema extends
+      com.google.protobuf.GeneratedMessage
+      implements PSchemaOrBuilder {
+    // Use PSchema.newBuilder() to construct.
+    private PSchema(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private PSchema(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final PSchema defaultInstance;
+    public static PSchema getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public PSchema getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private PSchema(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              schemaName_ = input.readBytes();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              timeStamp_ = input.readInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.internal_static_PSchema_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.internal_static_PSchema_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.class, 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<PSchema> PARSER =
+        new com.google.protobuf.AbstractParser<PSchema>() {
+      public PSchema parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new PSchema(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<PSchema> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string schemaName = 1;
+    public static final int SCHEMANAME_FIELD_NUMBER = 1;
+    private java.lang.Object schemaName_;
+    /**
+     * <code>required string schemaName = 1;</code>
+     */
+    public boolean hasSchemaName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string schemaName = 1;</code>
+     */
+    public java.lang.String getSchemaName() {
+      java.lang.Object ref = schemaName_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          schemaName_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string schemaName = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getSchemaNameBytes() {
+      java.lang.Object ref = schemaName_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        schemaName_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required int64 timeStamp = 2;
+    public static final int TIMESTAMP_FIELD_NUMBER = 2;
+    private long timeStamp_;
+    /**
+     * <code>required int64 timeStamp = 2;</code>
+     */
+    public boolean hasTimeStamp() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required int64 timeStamp = 2;</code>
+     */
+    public long getTimeStamp() {
+      return timeStamp_;
+    }
+
+    private void initFields() {
+      schemaName_ = "";
+      timeStamp_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasSchemaName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasTimeStamp()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getSchemaNameBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeInt64(2, timeStamp_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getSchemaNameBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(2, timeStamp_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema)) {
+        return super.equals(obj);
+      }
+      org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema other = 
(org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema) obj;
+
+      boolean result = true;
+      result = result && (hasSchemaName() == other.hasSchemaName());
+      if (hasSchemaName()) {
+        result = result && getSchemaName()
+            .equals(other.getSchemaName());
+      }
+      result = result && (hasTimeStamp() == other.hasTimeStamp());
+      if (hasTimeStamp()) {
+        result = result && (getTimeStamp()
+            == other.getTimeStamp());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSchemaName()) {
+        hash = (37 * hash) + SCHEMANAME_FIELD_NUMBER;
+        hash = (53 * hash) + getSchemaName().hashCode();
+      }
+      if (hasTimeStamp()) {
+        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getTimeStamp());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(byte[] 
data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code PSchema}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchemaOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.internal_static_PSchema_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.internal_static_PSchema_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.class, 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.Builder.class);
+      }
+
+      // Construct using 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        schemaName_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        timeStamp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.internal_static_PSchema_descriptor;
+      }
+
+      public org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
getDefaultInstanceForType() {
+        return 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.getDefaultInstance();
+      }
+
+      public org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
build() {
+        org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema result 
= buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
buildPartial() {
+        org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema result 
= new org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.schemaName_ = schemaName_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.timeStamp_ = timeStamp_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema) {
+          return 
mergeFrom((org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema other) 
{
+        if (other == 
org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.getDefaultInstance())
 return this;
+        if (other.hasSchemaName()) {
+          bitField0_ |= 0x00000001;
+          schemaName_ = other.schemaName_;
+          onChanged();
+        }
+        if (other.hasTimeStamp()) {
+          setTimeStamp(other.getTimeStamp());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasSchemaName()) {
+          
+          return false;
+        }
+        if (!hasTimeStamp()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema 
parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema) 
e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string schemaName = 1;
+      private java.lang.Object schemaName_ = "";
+      /**
+       * <code>required string schemaName = 1;</code>
+       */
+      public boolean hasSchemaName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string schemaName = 1;</code>
+       */
+      public java.lang.String getSchemaName() {
+        java.lang.Object ref = schemaName_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          schemaName_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string schemaName = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getSchemaNameBytes() {
+        java.lang.Object ref = schemaName_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          schemaName_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string schemaName = 1;</code>
+       */
+      public Builder setSchemaName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        schemaName_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string schemaName = 1;</code>
+       */
+      public Builder clearSchemaName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        schemaName_ = getDefaultInstance().getSchemaName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string schemaName = 1;</code>
+       */
+      public Builder setSchemaNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        schemaName_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required int64 timeStamp = 2;
+      private long timeStamp_ ;
+      /**
+       * <code>required int64 timeStamp = 2;</code>
+       */
+      public boolean hasTimeStamp() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required int64 timeStamp = 2;</code>
+       */
+      public long getTimeStamp() {
+        return timeStamp_;
+      }
+      /**
+       * <code>required int64 timeStamp = 2;</code>
+       */
+      public Builder setTimeStamp(long value) {
+        bitField0_ |= 0x00000002;
+        timeStamp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 timeStamp = 2;</code>
+       */
+      public Builder clearTimeStamp() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        timeStamp_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:PSchema)
+    }
+
+    static {
+      defaultInstance = new PSchema(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:PSchema)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_PSchema_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_PSchema_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\rPSchema.proto\"0\n\007PSchema\022\022\n\nschemaName" +
+      "\030\001 \002(\t\022\021\n\ttimeStamp\030\002 
\002(\003BA\n(org.apache." +
+      "phoenix.coprocessor.generatedB\rPSchemaPr" +
+      "otosH\001\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
+      new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_PSchema_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_PSchema_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_PSchema_descriptor,
+              new java.lang.String[] { "SchemaName", "TimeStamp", });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index 9fdfe51..126c0dd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -3338,6 +3338,16 @@ public final class PTableProtos {
      * <code>optional int64 indexDisableTimestamp = 29;</code>
      */
     long getIndexDisableTimestamp();
+
+    // optional bool isNamespaceMapped = 30;
+    /**
+     * <code>optional bool isNamespaceMapped = 30;</code>
+     */
+    boolean hasIsNamespaceMapped();
+    /**
+     * <code>optional bool isNamespaceMapped = 30;</code>
+     */
+    boolean getIsNamespaceMapped();
   }
   /**
    * Protobuf type {@code PTable}
@@ -3553,6 +3563,11 @@ public final class PTableProtos {
               indexDisableTimestamp_ = input.readInt64();
               break;
             }
+            case 240: {
+              bitField0_ |= 0x02000000;
+              isNamespaceMapped_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4163,6 +4178,22 @@ public final class PTableProtos {
       return indexDisableTimestamp_;
     }
 
+    // optional bool isNamespaceMapped = 30;
+    public static final int ISNAMESPACEMAPPED_FIELD_NUMBER = 30;
+    private boolean isNamespaceMapped_;
+    /**
+     * <code>optional bool isNamespaceMapped = 30;</code>
+     */
+    public boolean hasIsNamespaceMapped() {
+      return ((bitField0_ & 0x02000000) == 0x02000000);
+    }
+    /**
+     * <code>optional bool isNamespaceMapped = 30;</code>
+     */
+    public boolean getIsNamespaceMapped() {
+      return isNamespaceMapped_;
+    }
+
     private void initFields() {
       schemaNameBytes_ = com.google.protobuf.ByteString.EMPTY;
       tableNameBytes_ = com.google.protobuf.ByteString.EMPTY;
@@ -4193,6 +4224,7 @@ public final class PTableProtos {
       transactional_ = false;
       updateCacheFrequency_ = 0L;
       indexDisableTimestamp_ = 0L;
+      isNamespaceMapped_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4347,6 +4379,9 @@ public final class PTableProtos {
       if (((bitField0_ & 0x01000000) == 0x01000000)) {
         output.writeInt64(29, indexDisableTimestamp_);
       }
+      if (((bitField0_ & 0x02000000) == 0x02000000)) {
+        output.writeBool(30, isNamespaceMapped_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -4477,6 +4512,10 @@ public final class PTableProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeInt64Size(29, indexDisableTimestamp_);
       }
+      if (((bitField0_ & 0x02000000) == 0x02000000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(30, isNamespaceMapped_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -4633,6 +4672,11 @@ public final class PTableProtos {
         result = result && (getIndexDisableTimestamp()
             == other.getIndexDisableTimestamp());
       }
+      result = result && (hasIsNamespaceMapped() == 
other.hasIsNamespaceMapped());
+      if (hasIsNamespaceMapped()) {
+        result = result && (getIsNamespaceMapped()
+            == other.getIsNamespaceMapped());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -4762,6 +4806,10 @@ public final class PTableProtos {
         hash = (37 * hash) + INDEXDISABLETIMESTAMP_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getIndexDisableTimestamp());
       }
+      if (hasIsNamespaceMapped()) {
+        hash = (37 * hash) + ISNAMESPACEMAPPED_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getIsNamespaceMapped());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -4944,6 +4992,8 @@ public final class PTableProtos {
         bitField0_ = (bitField0_ & ~0x08000000);
         indexDisableTimestamp_ = 0L;
         bitField0_ = (bitField0_ & ~0x10000000);
+        isNamespaceMapped_ = false;
+        bitField0_ = (bitField0_ & ~0x20000000);
         return this;
       }
 
@@ -5104,6 +5154,10 @@ public final class PTableProtos {
           to_bitField0_ |= 0x01000000;
         }
         result.indexDisableTimestamp_ = indexDisableTimestamp_;
+        if (((from_bitField0_ & 0x20000000) == 0x20000000)) {
+          to_bitField0_ |= 0x02000000;
+        }
+        result.isNamespaceMapped_ = isNamespaceMapped_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -5285,6 +5339,9 @@ public final class PTableProtos {
         if (other.hasIndexDisableTimestamp()) {
           setIndexDisableTimestamp(other.getIndexDisableTimestamp());
         }
+        if (other.hasIsNamespaceMapped()) {
+          setIsNamespaceMapped(other.getIsNamespaceMapped());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -7054,6 +7111,39 @@ public final class PTableProtos {
         return this;
       }
 
+      // optional bool isNamespaceMapped = 30;
+      private boolean isNamespaceMapped_ ;
+      /**
+       * <code>optional bool isNamespaceMapped = 30;</code>
+       */
+      public boolean hasIsNamespaceMapped() {
+        return ((bitField0_ & 0x20000000) == 0x20000000);
+      }
+      /**
+       * <code>optional bool isNamespaceMapped = 30;</code>
+       */
+      public boolean getIsNamespaceMapped() {
+        return isNamespaceMapped_;
+      }
+      /**
+       * <code>optional bool isNamespaceMapped = 30;</code>
+       */
+      public Builder setIsNamespaceMapped(boolean value) {
+        bitField0_ |= 0x20000000;
+        isNamespaceMapped_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool isNamespaceMapped = 30;</code>
+       */
+      public Builder clearIsNamespaceMapped() {
+        bitField0_ = (bitField0_ & ~0x20000000);
+        isNamespaceMapped_ = false;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:PTable)
     }
 
@@ -7101,7 +7191,7 @@ public final class PTableProtos {
       "\016\n\006values\030\002 
\003(\014\022\033\n\023guidePostsByteCount\030\003",
       " \001(\003\022\025\n\rkeyBytesCount\030\004 
\001(\003\022\027\n\017guidePost" +
       "sCount\030\005 \001(\005\022!\n\013pGuidePosts\030\006 
\001(\0132\014.PGui" +
-      "dePosts\"\303\005\n\006PTable\022\027\n\017schemaNameBytes\030\001 " +
+      "dePosts\"\336\005\n\006PTable\022\027\n\017schemaNameBytes\030\001 " +
       "\002(\014\022\026\n\016tableNameBytes\030\002 
\002(\014\022\036\n\ttableType" +
       "\030\003 \002(\0162\013.PTableType\022\022\n\nindexState\030\004 
\001(\t\022" +
       "\026\n\016sequenceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006 
\002" +
@@ -7118,11 +7208,11 @@ public final class PTableProtos {
       "storeNulls\030\030 \001(\010\022\027\n\017baseColumnCount\030\031 
\001(" +
       "\005\022\036\n\026rowKeyOrderOptimizable\030\032 
\001(\010\022\025\n\rtra" +
       "nsactional\030\033 \001(\010\022\034\n\024updateCacheFrequency" +
-      "\030\034 \001(\003\022\035\n\025indexDisableTimestamp\030\035 
\001(\003*A\n",
-      
"\nPTableType\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIE"
 +
-      
"W\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache.p" 
+
-      "hoenix.coprocessor.generatedB\014PTableProt" +
-      "osH\001\210\001\001\240\001\001"
+      "\030\034 \001(\003\022\035\n\025indexDisableTimestamp\030\035 
\001(\003\022\031\n",
+      "\021isNamespaceMapped\030\036 \001(\010*A\n\nPTableType\022\n" +
+      
"\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020"
 +
+      "\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoenix.coproc" +
+      "essor.generatedB\014PTableProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
       new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -7146,7 +7236,7 @@ public final class PTableProtos {
           internal_static_PTable_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_PTable_descriptor,
-              new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", 
"TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", 
"BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", 
"DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", 
"ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", 
"IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", 
"RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", 
"IndexDisableTimestamp", });
+              new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", 
"TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", 
"BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", 
"DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", 
"ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", 
"IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", 
"RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", 
"IndexDisableTimestamp", "IsNamespaceMapped", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 65cb6db..9ac05a4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -33,6 +33,8 @@ import 
org.apache.phoenix.schema.ConcurrentTableMutationException;
 import org.apache.phoenix.schema.FunctionAlreadyExistsException;
 import org.apache.phoenix.schema.FunctionNotFoundException;
 import org.apache.phoenix.schema.ReadOnlyTableException;
+import org.apache.phoenix.schema.SchemaAlreadyExistsException;
+import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
@@ -374,8 +376,23 @@ public enum SQLExceptionCode {
     }),
     UNALLOWED_USER_DEFINED_FUNCTIONS(6003, "42F03",
             "User defined functions are configured to not be allowed. To allow 
configure "
-                    + QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB + " to 
true."),
-    ;
+                    + QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB + " to 
true."), 
+
+    SCHEMA_ALREADY_EXISTS(721, "42M04", "Schema with given name already 
exists", new Factory() {
+        @Override
+        public SQLException newException(SQLExceptionInfo info) {
+            return new SchemaAlreadyExistsException(info.getSchemaName());
+        }
+    }), SCHEMA_NOT_FOUND(722, "43M05", "Schema does not exists", new Factory() 
{
+        @Override
+        public SQLException newException(SQLExceptionInfo info) {
+            return new SchemaNotFoundException(info.getSchemaName());
+        }
+    }), CANNOT_MUTATE_SCHEMA(723, "43M06", "Cannot mutate schema as schema has 
existing tables"), SCHEMA_NOT_ALLOWED(
+            724, "43M07",
+            "Schema name not allowed!!"), CREATE_SCHEMA_NOT_ALLOWED(725, 
"43M08", "Cannot create schema because config "
+                    + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " for 
enabling name space mapping isn't enabled.");
+
 
     private final int errorCode;
     private final String sqlState;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
index 1da5aff..a014da2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/master/IndexMasterObserver.java
@@ -87,11 +87,9 @@ public class IndexMasterObserver extends BaseMasterObserver {
             AssignmentManager am = 
ctx.getEnvironment().getMasterServices().getAssignmentManager();
             RegionStates regionStates = am.getRegionStates();
             String tableName = region.getTable().getNameAsString();
-            String correspondingTable =
-                    region.getTable().getNameAsString()
-                            .startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX) 
? MetaDataUtil
-                            .getUserTableName(tableName) : MetaDataUtil
-                            .getLocalIndexTableName(tableName);
+            String correspondingTable = 
MetaDataUtil.isLocalIndex(region.getTable().getNameAsString())
+                    ? MetaDataUtil.getUserTableName(tableName)
+                    : 
Bytes.toString(MetaDataUtil.getLocalIndexPhysicalName(tableName.getBytes()));
             List<HRegionInfo> regions =
                     
regionStates.getRegionsOfTable(TableName.valueOf(correspondingTable));
             for (HRegionInfo hri : regions) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 56bf637..233dc57 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -150,7 +150,7 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
                         // index is pretty hacky. If we're going to keep this, 
we should revisit that
                         // as well.
                         try {
-                            if 
(tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX))
 {
+                            if 
(MetaDataUtil.isLocalIndex(tableReference.getTableName())) {
                                 Region indexRegion = 
IndexUtil.getIndexRegion(env);
                                 if (indexRegion != null) {
                                     throwFailureIfDone();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index b1b2656..14768ac 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -154,7 +154,7 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
                             // Also, checking the prefix of the table name to 
determine if this is a local
                             // index is pretty hacky. If we're going to keep 
this, we should revisit that
                             // as well.
-                            if 
(tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX))
 {
+                            if 
(MetaDataUtil.isLocalIndex(tableReference.getTableName())) {
                                 Region indexRegion = 
IndexUtil.getIndexRegion(env);
                                 if (indexRegion != null) {
                                     throwFailureIfDone();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index c7ed49b..ea245cc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -139,7 +138,7 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
             }
 
             // its a local index table, so we need to convert it to the index 
table names we should disable
-            if 
(ref.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
+            if (MetaDataUtil.isLocalIndex(ref.getTableName())) {
                 for (String tableName : getLocalIndexNames(ref, mutations)) {
                     indexTableNames.put(tableName, minTimeStamp);
                 }
@@ -154,9 +153,8 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
             long minTimeStamp = tableTimeElement.getValue();
             // Disable the index by using the updateIndexState method of 
MetaDataProtocol end point coprocessor.
             byte[] indexTableKey = 
SchemaUtil.getTableKeyFromFullName(indexTableName);
-            HTableInterface
-                    systemTable =
-                    
env.getTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES));
+            HTableInterface systemTable = env.getTable(SchemaUtil
+                    
.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, 
env.getConfiguration()));
             // Mimic the Put that gets generated by the client on an update of 
the index state
             Put put = new Put(indexTableKey);
             if (blockWriteRebuildIndex) 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index a17258a..a78565d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -74,7 +74,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
         @Override
         public PeekingResultIterator newIterator(StatementContext context, 
ResultIterator scanner, Scan scan, String tableName) throws SQLException {
-            if (logger.isDebugEnabled()) 
logger.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator
 over " + tableRef.getTable().getName().getString() + " with " + scan, 
ScanUtil.getCustomAnnotations(scan)));
+            if (logger.isDebugEnabled()) 
logger.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator
 over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, 
ScanUtil.getCustomAnnotations(scan)));
             return new ChunkedResultIterator(delegateFactory, mutationState, 
context, tableRef, scan, 
                     
mutationState.getConnection().getQueryServices().getProps().getLong(
                                 QueryServices.SCAN_RESULT_CHUNK_SIZE,
@@ -93,7 +93,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
         // Instantiate single chunk iterator and the delegate iterator in 
constructor
         // to get parallel scans kicked off in separate threads. If we delay 
this,
         // we'll get serialized behavior (see PHOENIX-
-        if (logger.isDebugEnabled()) 
logger.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator 
over " + tableRef.getTable().getName().getString() + " with " + scan, 
ScanUtil.getCustomAnnotations(scan)));
+        if (logger.isDebugEnabled()) 
logger.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator 
over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, 
ScanUtil.getCustomAnnotations(scan)));
         ResultIterator singleChunkResultIterator = new 
SingleChunkResultIterator(scanner, chunkSize);
         String tableName = tableRef.getTable().getPhysicalName().getString();
         resultIterator = delegateIteratorFactory.newIterator(context, 
singleChunkResultIterator, scan, tableName);
@@ -124,7 +124,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
             resultIterator.close();
             scan = ScanUtil.newScan(scan);
             scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
-            if (logger.isDebugEnabled()) 
logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator 
over " + tableRef.getTable().getName().getString() + " with " + scan, 
ScanUtil.getCustomAnnotations(scan)));
+            if (logger.isDebugEnabled()) 
logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator 
over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, 
ScanUtil.getCustomAnnotations(scan)));
             String tableName = 
tableRef.getTable().getPhysicalName().getString();
             long renewLeaseThreshold = 
context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
             ResultIterator singleChunkResultIterator = new 
SingleChunkResultIterator(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 1351735..ca0eba0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -130,7 +130,7 @@ public class ParallelIterators extends BaseResultIterators {
                 public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                     return taskMetrics;
                 }
-            }, "Parallel scanner for table: " + 
tableRef.getTable().getName().getString()));
+            }, "Parallel scanner for table: " + 
tableRef.getTable().getPhysicalName().getString()));
             // Add our future in the right place so that we can concatenate the
             // results of the inner futures versus merge sorting across all of 
them.
             
nestedFutures.get(scanLocation.getOuterListIndex()).set(scanLocation.getInnerListIndex(),
 new Pair<Scan,Future<PeekingResultIterator>>(scan,future));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
index f3b9e7d..17c2279 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
@@ -115,7 +115,7 @@ public class SerialIterators extends BaseResultIterators {
                 public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                     return taskMetrics;
                 }
-            }, "Serial scanner for table: " + 
tableRef.getTable().getName().getString()));
+            }, "Serial scanner for table: " + 
tableRef.getTable().getPhysicalName().getString()));
             // Add our singleton Future which will execute serially
             nestedFutures.add(Collections.singletonList(new 
Pair<Scan,Future<PeekingResultIterator>>(overallScan,future)));
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index ab4fe35..69ce035 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -75,6 +75,7 @@ import org.apache.phoenix.iterate.TableResultIterator;
 import org.apache.phoenix.iterate.TableResultIteratorFactory;
 import org.apache.phoenix.jdbc.PhoenixStatement.PhoenixStatementParser;
 import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.DelegateConnectionQueryServices;
 import org.apache.phoenix.query.MetaDataMutated;
@@ -89,6 +90,7 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableRef;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.schema.types.PDataType;
@@ -134,6 +136,7 @@ import com.google.common.collect.Lists;
  */
 public class PhoenixConnection implements Connection, MetaDataMutated, 
SQLCloseable {
     private final String url;
+    private String schema;
     private final ConnectionQueryServices services;
     private final Properties info;
     private final Map<PDataType<?>, Format> formatters = new HashMap<>();
@@ -244,6 +247,10 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
         this.consistency = JDBCUtil.getConsistencyLevel(url, this.info, 
this.services.getProps()
                  .get(QueryServices.CONSISTENCY_ATTRIB,
                          QueryServicesOptions.DEFAULT_CONSISTENCY_LEVEL));
+        // currently we are not resolving schema set through property, so if 
schema doesn't exists ,connection will not fail
+        // but queries may fail
+        this.schema = JDBCUtil.getSchema(url, this.info,
+                this.services.getProps().get(QueryServices.SCHEMA_ATTRIB, 
QueryServicesOptions.DEFAULT_SCHEMA));
         this.tenantId = tenantId;
         this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, 
this.services.getProps());
         datePattern = 
this.services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, 
DateUtil.DEFAULT_DATE_FORMAT);
@@ -855,14 +862,17 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 
     @Override
     public void setSchema(String schema) throws SQLException {
-        // TODO Auto-generated method stub
-        
+        this.schema = schema;
+
     }
 
     @Override
     public String getSchema() throws SQLException {
-        // TODO Auto-generated method stub
-        return null;
+        return this.schema;
+    }
+
+    public PSchema getSchema(PTableKey key) throws SchemaNotFoundException {
+        return metaData.getSchema(key);
     }
 
     @Override
@@ -912,11 +922,25 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
     }
 
     @Override
-    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> 
columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows, 
boolean isWalDisabled, boolean isMultitenant, boolean storeNulls, boolean 
isTransactional, long updateCacheFrequency, long resolvedTime)
-            throws SQLException {
-        metaData = metaData.addColumn(tenantId, tableName, columns, 
tableTimeStamp, tableSeqNum, isImmutableRows, isWalDisabled, isMultitenant, 
storeNulls, isTransactional, updateCacheFrequency, resolvedTime);
-        //Cascade through to connectionQueryServices too
-        getQueryServices().addColumn(tenantId, tableName, columns, 
tableTimeStamp, tableSeqNum, isImmutableRows, isWalDisabled, isMultitenant, 
storeNulls, isTransactional, updateCacheFrequency, resolvedTime);
+    public PMetaData addSchema(PSchema schema) throws SQLException {
+        metaData = metaData.addSchema(schema);
+        // Cascade through to connectionQueryServices too
+        getQueryServices().addSchema(schema);
+        return metaData;
+    }
+
+    @Override
+    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> 
columns, long tableTimeStamp,
+            long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, 
boolean isMultitenant, boolean storeNulls,
+            boolean isTransactional, long updateCacheFrequency, boolean 
isNamespaceMapped, long resolvedTime)
+                    throws SQLException {
+        metaData = metaData.addColumn(tenantId, tableName, columns, 
tableTimeStamp, tableSeqNum, isImmutableRows,
+                isWalDisabled, isMultitenant, storeNulls, isTransactional, 
updateCacheFrequency, isNamespaceMapped,
+                resolvedTime);
+        // Cascade through to connectionQueryServices too
+        getQueryServices().addColumn(tenantId, tableName, columns, 
tableTimeStamp, tableSeqNum, isImmutableRows,
+                isWalDisabled, isMultitenant, storeNulls, isTransactional, 
updateCacheFrequency, isNamespaceMapped,
+                resolvedTime);
         return metaData;
     }
 
@@ -1040,4 +1064,13 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
         checkNotNull(factory);
         this.tableResultIteratorFactory = factory;
     }
+
+    @Override
+    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
+        metaData = metaData.removeSchema(schema, schemaTimeStamp);
+        // Cascade through to connectionQueryServices too
+        getQueryServices().removeSchema(schema, schemaTimeStamp);
+        return metaData;
+
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 67814e8..2fffc84 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -52,6 +52,7 @@ import org.apache.phoenix.iterate.DelegateResultIterator;
 import org.apache.phoenix.iterate.MaterializedResultIterator;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable.LinkType;
@@ -100,6 +101,8 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
     public static final byte[] SYSTEM_CATALOG_NAME_BYTES = 
Bytes.toBytes(SYSTEM_CATALOG_NAME);
     public static final String SYSTEM_STATS_TABLE = "STATS";
     public static final String SYSTEM_STATS_NAME = 
SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_STATS_TABLE);
+    public static final String IS_NAMESPACE_MAPPED = "IS_NAMESPACE_MAPPED";
+    public static final byte[] IS_NAMESPACE_MAPPED_BYTES = 
Bytes.toBytes(IS_NAMESPACE_MAPPED);
     public static final byte[] SYSTEM_STATS_NAME_BYTES = 
Bytes.toBytes(SYSTEM_STATS_NAME);
     public static final byte[] SYSTEM_STATS_TABLE_BYTES = 
Bytes.toBytes(SYSTEM_STATS_TABLE);
     public static final String SYSTEM_CATALOG_ALIAS = "\"SYSTEM.TABLE\"";
@@ -915,10 +918,10 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
                 TENANT_ID + " " + TABLE_CATALOG +
                 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
                 " where " + COLUMN_NAME + " is null");
-        this.addTenantIdFilter(buf, catalog);
         if (schemaPattern != null) {
             buf.append(" and " + TABLE_SCHEM + " like '" + 
StringUtil.escapeStringConstant(schemaPattern) + "'");
         }
+        buf.append(" and " + TABLE_NAME + " = '" + MetaDataClient.EMPTY_TABLE 
+ "'");
         Statement stmt = connection.createStatement();
         return stmt.executeQuery(buf.toString());
     }
@@ -1040,7 +1043,8 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
                 SQLIndexTypeFunction.NAME + "(" + INDEX_TYPE + ") AS " + 
INDEX_TYPE +
                 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
                 " where " + COLUMN_NAME + " is null" +
-                " and " + COLUMN_FAMILY + " is null");
+                " and " + COLUMN_FAMILY + " is null" +
+                " and " + TABLE_NAME + " != '" + MetaDataClient.EMPTY_TABLE + 
"'");
         addTenantIdFilter(buf, catalog);
         if (schemaPattern != null) {
             buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? 
" is null" : " like '" + StringUtil.escapeStringConstant(schemaPattern) + "'" 
));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 880e758..5b799a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -51,6 +51,7 @@ import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.CreateFunctionCompiler;
 import org.apache.phoenix.compile.CreateIndexCompiler;
+import org.apache.phoenix.compile.CreateSchemaCompiler;
 import org.apache.phoenix.compile.CreateSequenceCompiler;
 import org.apache.phoenix.compile.CreateTableCompiler;
 import org.apache.phoenix.compile.DeleteCompiler;
@@ -92,6 +93,7 @@ import org.apache.phoenix.parse.ColumnDef;
 import org.apache.phoenix.parse.ColumnName;
 import org.apache.phoenix.parse.CreateFunctionStatement;
 import org.apache.phoenix.parse.CreateIndexStatement;
+import org.apache.phoenix.parse.CreateSchemaStatement;
 import org.apache.phoenix.parse.CreateSequenceStatement;
 import org.apache.phoenix.parse.CreateTableStatement;
 import org.apache.phoenix.parse.DeleteJarStatement;
@@ -99,6 +101,7 @@ import org.apache.phoenix.parse.DeleteStatement;
 import org.apache.phoenix.parse.DropColumnStatement;
 import org.apache.phoenix.parse.DropFunctionStatement;
 import org.apache.phoenix.parse.DropIndexStatement;
+import org.apache.phoenix.parse.DropSchemaStatement;
 import org.apache.phoenix.parse.DropSequenceStatement;
 import org.apache.phoenix.parse.DropTableStatement;
 import org.apache.phoenix.parse.ExplainStatement;
@@ -124,6 +127,7 @@ import org.apache.phoenix.parse.TraceStatement;
 import org.apache.phoenix.parse.UDFParseNode;
 import org.apache.phoenix.parse.UpdateStatisticsStatement;
 import org.apache.phoenix.parse.UpsertStatement;
+import org.apache.phoenix.parse.UseSchemaStatement;
 import org.apache.phoenix.query.HBaseFactoryProvider;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
@@ -620,6 +624,19 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
         }
     }
 
+    private static class ExecutableCreateSchemaStatement extends 
CreateSchemaStatement implements CompilableStatement {
+        ExecutableCreateSchemaStatement(String schemaName, boolean 
ifNotExists) {
+            super(schemaName, ifNotExists);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public MutationPlan compilePlan(PhoenixStatement stmt, 
Sequence.ValueOp seqAction) throws SQLException {
+            CreateSchemaCompiler compiler = new CreateSchemaCompiler(stmt);
+            return compiler.compile(this);
+        }
+    }
+
     private static class ExecutableCreateFunctionStatement extends 
CreateFunctionStatement implements CompilableStatement {
 
         public ExecutableCreateFunctionStatement(PFunction functionInfo, 
boolean temporary, boolean isReplace) {
@@ -869,6 +886,58 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
         }
     }
 
+    private static class ExecutableDropSchemaStatement extends 
DropSchemaStatement implements CompilableStatement {
+
+        ExecutableDropSchemaStatement(String schemaName, boolean ifExists, 
boolean cascade) {
+            super(schemaName, ifExists, cascade);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public MutationPlan compilePlan(final PhoenixStatement stmt, 
Sequence.ValueOp seqAction) throws SQLException {
+            final StatementContext context = new StatementContext(stmt);
+            return new BaseMutationPlan(context, this.getOperation()) {
+
+                @Override
+                public ExplainPlan getExplainPlan() throws SQLException {
+                    return new ExplainPlan(Collections.singletonList("DROP 
SCHEMA"));
+                }
+
+                @Override
+                public MutationState execute() throws SQLException {
+                    MetaDataClient client = new 
MetaDataClient(getContext().getConnection());
+                    return 
client.dropSchema(ExecutableDropSchemaStatement.this);
+                }
+            };
+        }
+    }
+
+    private static class ExecutableUseSchemaStatement extends 
UseSchemaStatement implements CompilableStatement {
+
+        ExecutableUseSchemaStatement(String schemaName) {
+            super(schemaName);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public MutationPlan compilePlan(final PhoenixStatement stmt, 
Sequence.ValueOp seqAction) throws SQLException {
+            final StatementContext context = new StatementContext(stmt);
+            return new BaseMutationPlan(context, this.getOperation()) {
+
+                @Override
+                public ExplainPlan getExplainPlan() throws SQLException {
+                    return new ExplainPlan(Collections.singletonList("USE 
SCHEMA"));
+                }
+
+                @Override
+                public MutationState execute() throws SQLException {
+                    MetaDataClient client = new 
MetaDataClient(getContext().getConnection());
+                    return client.useSchema(ExecutableUseSchemaStatement.this);
+                }
+            };
+        }
+    }
+
     private static class ExecutableDropIndexStatement extends 
DropIndexStatement implements CompilableStatement {
 
         public ExecutableDropIndexStatement(NamedNode indexName, TableName 
tableName, boolean ifExists) {
@@ -880,7 +949,7 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
         public MutationPlan compilePlan(final PhoenixStatement stmt, 
Sequence.ValueOp seqAction) throws SQLException {
             final StatementContext context = new StatementContext(stmt);
             return new BaseMutationPlan(context, this.getOperation()) {
-                
+
                 @Override
                 public ExplainPlan getExplainPlan() throws SQLException {
                     return new ExplainPlan(Collections.singletonList("DROP 
INDEX"));
@@ -1080,7 +1149,12 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
                 List<ParseNode> splits, PTableType tableType, boolean 
ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount) 
{
             return new ExecutableCreateTableStatement(tableName, props, 
columns, pkConstraint, splits, tableType, ifNotExists, baseTableName, 
tableTypeIdNode, bindCount);
         }
-        
+
+        @Override
+        public CreateSchemaStatement createSchema(String schemaName, boolean 
ifNotExists) {
+            return new ExecutableCreateSchemaStatement(schemaName, 
ifNotExists);
+        }
+
         @Override
         public CreateSequenceStatement createSequence(TableName tableName, 
ParseNode startsWith,
                 ParseNode incrementBy, ParseNode cacheSize, ParseNode 
minValue, ParseNode maxValue,
@@ -1137,6 +1211,16 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
         }
 
         @Override
+        public DropSchemaStatement dropSchema(String schemaName, boolean 
ifExists, boolean cascade) {
+            return new ExecutableDropSchemaStatement(schemaName, ifExists, 
cascade);
+        }
+
+        @Override
+        public UseSchemaStatement useSchema(String schemaName) {
+            return new ExecutableUseSchemaStatement(schemaName);
+        }
+
+        @Override
         public DropFunctionStatement dropFunction(String functionName, boolean 
ifExists) {
             return new ExecutableDropFunctionStatement(functionName, ifExists);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index ff73530..0525de9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
@@ -231,7 +232,8 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
         }
 
         List<TargetTableRef> tablesToBeLoaded = new 
ArrayList<TargetTableRef>();
-        tablesToBeLoaded.add(new TargetTableRef(qualifiedTableName));
+        PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName);
+        tablesToBeLoaded.add(new TargetTableRef(qualifiedTableName, 
table.getPhysicalName().getString()));
         // using conn after it's been closed... o.O
         tablesToBeLoaded.addAll(getIndexTables(conn, schemaName, 
qualifiedTableName));
 
@@ -310,7 +312,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
             tableNames.add(table.getPhysicalName());
             LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
             String tableName = table.getPhysicalName();
-            Path tableOutputPath = new Path(outputPath,tableName);
+            Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputPath, 
tableName);
             HTable htable = new HTable(conf,tableName);
             LOG.info("Loading HFiles for {} from {}", tableName , 
tableOutputPath);
             loader.doBulkLoad(tableOutputPath, htable);
@@ -394,12 +396,11 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
         List<TargetTableRef> indexTables = new ArrayList<TargetTableRef>();
         for(PTable indexTable : table.getIndexes()){
             if (indexTable.getIndexType() == PTable.IndexType.LOCAL) {
-                indexTables.add(new 
TargetTableRef(getQualifiedTableName(schemaName, indexTable
-                        .getTableName().getString()), MetaDataUtil
-                        .getLocalIndexTableName(qualifiedTableName)));
+                indexTables.add(new 
TargetTableRef(indexTable.getName().getString(),
+                        
Bytes.toString(MetaDataUtil.getLocalIndexPhysicalName(table.getPhysicalName().getBytes()))));
             } else {
-                indexTables.add(new 
TargetTableRef(getQualifiedTableName(schemaName,
-                        indexTable.getTableName().getString())));
+                indexTables.add(
+                        new TargetTableRef(indexTable.getName().getString(), 
indexTable.getPhysicalName().getString()));
             }
         }
         return indexTables;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index bdc67f9..9289dbf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -18,8 +18,10 @@
 package org.apache.phoenix.mapreduce;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.query.QueryConstants;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -72,4 +74,9 @@ public class CsvBulkImportUtil {
         }
         return new String(Base64.decode(strValue)).charAt(0);
     }
+
+    public static Path getOutputPath(Path outputdir, String tableName) {
+        return new Path(outputdir,
+                tableName.replace(QueryConstants.NAMESPACE_SEPARATOR, 
QueryConstants.NAME_SEPARATOR));
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 7d79d64..35a2bd8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -160,7 +160,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat<TableRowkeyPair, Ce
                 // If this is a new column family, verify that the directory 
exists
                 if (wl == null) {
                     // phoenix-2216: start : create a directory for table and 
family within the output dir 
-                    Path tableOutputPath = new Path(outputdir, tableName);
+                    Path tableOutputPath = 
CsvBulkImportUtil.getOutputPath(outputdir, tableName);
                     fs.mkdirs(new Path(tableOutputPath, 
Bytes.toString(family)));
                     // phoenix-2216: end
                 }
@@ -216,7 +216,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat<TableRowkeyPair, Ce
               throws IOException {
           
               WriterLength wl = new WriterLength();
-              Path tableOutputPath = new Path(outputdir, tableName);
+              Path tableOutputPath = 
CsvBulkImportUtil.getOutputPath(outputdir, tableName);
               Path familydir = new Path(tableOutputPath, 
Bytes.toString(family));
             
               // phoenix-2216: start : fetching the configuration properties 
that were set to the table.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index f5117fc..34694c6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -51,6 +52,7 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.phoenix.compile.PostIndexDDLCompiler;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.CsvBulkImportUtil;
 import org.apache.phoenix.mapreduce.util.ColumnInfoToStringEncoderDecoder;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
@@ -202,9 +204,10 @@ public class IndexTool extends Configured implements Tool {
 
             // check if the index type is LOCAL, if so, derive and set the 
physicalIndexName that is
             // computed from the qDataTable name.
-            String physicalIndexTable = qIndexTable;
+            String physicalIndexTable = 
pindexTable.getPhysicalName().getString();
             if (IndexType.LOCAL.equals(pindexTable.getIndexType())) {
-                physicalIndexTable = 
MetaDataUtil.getLocalIndexTableName(qDataTable);
+                physicalIndexTable = Bytes
+                        
.toString(MetaDataUtil.getLocalIndexPhysicalName(pdataTable.getPhysicalName().getBytes()));
             }
 
             final PhoenixConnection pConnection = 
connection.unwrap(PhoenixConnection.class);
@@ -226,8 +229,8 @@ public class IndexTool extends Configured implements Tool {
                     PhoenixRuntime.generateColumnInfo(connection, qIndexTable, 
indexColumns);
             ColumnInfoToStringEncoderDecoder.encode(configuration, 
columnMetadataList);
 
-            final Path outputPath =
-                    new 
Path(cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()), physicalIndexTable);
+            final Path outputPath = CsvBulkImportUtil
+                    .getOutputPath(new 
Path(cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt())), physicalIndexTable);
             FileSystem.get(configuration).delete(outputPath, true);
             
             final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, 
dataTable, indexTable);
@@ -280,9 +283,9 @@ public class IndexTool extends Configured implements Tool {
         job.setMapOutputKeyClass(ImmutableBytesWritable.class);
         job.setMapOutputValueClass(KeyValue.class);
         final Configuration configuration = job.getConfiguration();
-        final String logicalIndexTable =
+        final String physicalIndexTable =
                 PhoenixConfigurationUtil.getPhysicalTableName(configuration);
-        final HTable htable = new HTable(configuration, logicalIndexTable);
+        final HTable htable = new HTable(configuration, physicalIndexTable);
         HFileOutputFormat.configureIncrementalLoad(job, htable);
         boolean status = job.waitForCompletion(true);
         if (!status) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java
new file mode 100644
index 0000000..c186137
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java
@@ -0,0 +1,25 @@
+package org.apache.phoenix.parse;
+
+public class CreateSchemaStatement extends MutableStatement {
+       private final String schemaName;
+       private final boolean ifNotExists;
+       
+       public CreateSchemaStatement(String schemaName,boolean ifNotExists) {
+               this.schemaName=schemaName;
+               this.ifNotExists = ifNotExists;
+       }
+       
+       @Override
+       public int getBindCount() {
+               return 0;
+       }
+
+       public String getSchemaName() {
+               return schemaName;
+       }
+
+       public boolean isIfNotExists() {
+               return ifNotExists;
+       }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java
new file mode 100644
index 0000000..8e4642d
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java
@@ -0,0 +1,38 @@
+package org.apache.phoenix.parse;
+
+import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
+
+public class DropSchemaStatement extends MutableStatement {
+    private final String schemaName;
+    private final boolean ifExists;
+    private final boolean cascade;
+
+    public DropSchemaStatement(String schemaName, boolean ifExists, boolean 
cascade) {
+        this.schemaName = schemaName;
+        this.ifExists = ifExists;
+        this.cascade = cascade;
+    }
+
+    @Override
+    public int getBindCount() {
+        return 0;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+
+    public boolean ifExists() {
+        return ifExists;
+    }
+
+    public boolean cascade() {
+        return cascade;
+    }
+
+    @Override
+    public Operation getOperation() {
+        return Operation.DELETE;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/parse/PSchema.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/PSchema.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/PSchema.java
new file mode 100644
index 0000000..7a0ddc8
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/PSchema.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.coprocessor.generated.PSchemaProtos;
+import org.apache.phoenix.schema.PMetaDataEntity;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.SizedUtil;
+
+public class PSchema implements PMetaDataEntity {
+
+    private final PName schemaName;
+    private PTableKey schemaKey;
+    private long timeStamp;
+    private int estimatedSize;
+
+    public PSchema(long timeStamp) { // For index delete marker
+        this.timeStamp = timeStamp;
+        this.schemaName = null;
+    }
+
+    public PSchema(String schemaName) {
+        this(schemaName, HConstants.LATEST_TIMESTAMP);
+    }
+
+    public PSchema(String schemaName, long timeStamp) {
+        this.schemaName = 
PNameFactory.newName(SchemaUtil.normalizeIdentifier(schemaName));
+        this.schemaKey = new PTableKey(null, this.schemaName.getString());
+        this.timeStamp = timeStamp;
+        this.estimatedSize = SizedUtil.INT_SIZE + SizedUtil.LONG_SIZE + 
PNameFactory.getEstimatedSize(this.schemaName);
+    }
+
+    public PSchema(PSchema schema) {
+        this(schema.getSchemaName().toString(), schema.getTimeStamp());
+    }
+
+    public String getSchemaName() {
+        return schemaName == null ? null : schemaName.getString();
+    }
+
+    public PTableKey getSchemaKey() {
+        return schemaKey;
+    }
+
+    public long getTimeStamp() {
+        return timeStamp;
+    }
+
+    public static PSchemaProtos.PSchema toProto(PSchema schema) {
+        PSchemaProtos.PSchema.Builder builder = 
PSchemaProtos.PSchema.newBuilder();
+        builder.setSchemaName(schema.getSchemaName());
+        builder.setTimeStamp(schema.getTimeStamp());
+        return builder.build();
+    }
+
+    public static PSchema createFromProto(PSchemaProtos.PSchema schema) {
+        long timeStamp = schema.getTimeStamp();
+        String schemaName = schema.getSchemaName();
+        return new PSchema(schemaName, timeStamp);
+    }
+
+    public int getEstimatedSize() {
+        return estimatedSize;
+    }
+
+}
+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 4c6c1e6..024e2c7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -287,6 +287,10 @@ public class ParseNodeFactory {
         return new CreateTableStatement(tableName, props, columns, 
pkConstraint, splits, tableType, ifNotExists, baseTableName, tableTypeIdNode, 
bindCount);
     }
 
+    public CreateSchemaStatement createSchema(String schemaName, boolean 
ifNotExists) {
+        return new CreateSchemaStatement(schemaName, ifNotExists);
+    }
+
     public CreateIndexStatement createIndex(NamedNode indexName, 
NamedTableNode dataTable, IndexKeyConstraint ikConstraint, List<ColumnName> 
includeColumns, List<ParseNode> splits, 
ListMultimap<String,Pair<String,Object>> props, boolean ifNotExists, IndexType 
indexType,boolean async, int bindCount, Map<String, UDFParseNode> 
udfParseNodes) {
         return new CreateIndexStatement(indexName, dataTable, ikConstraint, 
includeColumns, splits, props, ifNotExists, indexType, async, bindCount, 
udfParseNodes);
     }
@@ -836,4 +840,12 @@ public class ParseNodeFactory {
     public OffsetNode offset(LiteralParseNode l) {
         return new OffsetNode(l);
     }
+
+    public DropSchemaStatement dropSchema(String schemaName, boolean ifExists, 
boolean cascade) {
+        return new DropSchemaStatement(schemaName, ifExists, cascade);
+    }
+
+    public UseSchemaStatement useSchema(String schemaName) {
+        return new UseSchemaStatement(schemaName);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java
new file mode 100644
index 0000000..3c5d0f0
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java
@@ -0,0 +1,19 @@
+package org.apache.phoenix.parse;
+
+public class UseSchemaStatement extends MutableStatement {
+    private final String schemaName;
+
+    public UseSchemaStatement(String schemaName) {
+        this.schemaName = schemaName;
+    }
+
+    @Override
+    public int getBindCount() {
+        return 0;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/de9a2c7b/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
index 5df0492..0a69ae4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
@@ -32,14 +32,14 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.util.StringUtils;
-
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcController;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
-import org.apache.phoenix.coprocessor.generated.ServerCachingProtos;
 import org.apache.phoenix.coprocessor.generated.PTableProtos;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos;
 import org.apache.phoenix.schema.PTableType;
 
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcController;
+
 public class ProtobufUtil {
 
     /**
@@ -71,6 +71,10 @@ public class ProtobufUtil {
         return getMutations(request.getTableMetadataMutationsList());
     }
 
+    public static List<Mutation> getMutations(MetaDataProtos.DropSchemaRequest 
request) throws IOException {
+        return getMutations(request.getSchemaMetadataMutationsList());
+    }
+
     public static List<Mutation> 
getMutations(MetaDataProtos.DropFunctionRequest request)
             throws IOException {
         return getMutations(request.getTableMetadataMutationsList());
@@ -101,6 +105,10 @@ public class ProtobufUtil {
         return getMutations(request.getTableMetadataMutationsList());
     }
 
+    public static List<Mutation> 
getMutations(MetaDataProtos.CreateSchemaRequest request) throws IOException {
+        return getMutations(request.getTableMetadataMutationsList());
+    }
+
     /**
      * Each ByteString entry is a byte array serialized from MutationProto 
instance
      * @param mutations

Reply via email to