Author: jbellis
Date: Thu Feb 17 15:08:24 2011
New Revision: 1071636

URL: http://svn.apache.org/viewvc?rev=1071636&view=rev
Log:
Handle whole-row deletions in CFOutputFormat
patch by Patrik Modesto; reviewed by jbellis for CASSANDRA-2014

Modified:
    cassandra/branches/cassandra-0.7/CHANGES.txt
    
cassandra/branches/cassandra-0.7/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordWriter.java

Modified: cassandra/branches/cassandra-0.7/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/cassandra/branches/cassandra-0.7/CHANGES.txt?rev=1071636&r1=1071635&r2=1071636&view=diff
==============================================================================
--- cassandra/branches/cassandra-0.7/CHANGES.txt (original)
+++ cassandra/branches/cassandra-0.7/CHANGES.txt Thu Feb 17 15:08:24 2011
@@ -3,8 +3,10 @@
  * lower-latency read repair (CASSANDRA-2069)
  * add hinted_handoff_throttle_delay_in_ms option (CASSANDRA-2161)
  * fixes for cache save/load (CASSANDRA-2172, -2174)
+ * Handle whole-row deletions in CFOutputFormat (CASSANDRA-2014)
  * Make memtable_flush_writers flush in parallel (CASSANDRA-2178)
 
+
 0.7.2
  * copy DecoratedKey.key when inserting into caches to avoid retaining
    a reference to the underlying buffer (CASSANDRA-2102)

Modified: 
cassandra/branches/cassandra-0.7/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordWriter.java
URL: 
http://svn.apache.org/viewvc/cassandra/branches/cassandra-0.7/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordWriter.java?rev=1071636&r1=1071635&r2=1071636&view=diff
==============================================================================
--- 
cassandra/branches/cassandra-0.7/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordWriter.java
 (original)
+++ 
cassandra/branches/cassandra-0.7/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordWriter.java
 Thu Feb 17 15:08:24 2011
@@ -143,33 +143,23 @@ implements org.apache.hadoop.mapred.Reco
     {
         Mutation mutation = new Mutation();
         org.apache.cassandra.avro.ColumnOrSuperColumn acosc = 
amut.column_or_supercolumn;
-        if (acosc != null)
-        {
-            // creation
-            ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
-            mutation.setColumn_or_supercolumn(cosc);
-            if (acosc.column != null)
-                // standard column
-                cosc.setColumn(avroToThrift(acosc.column));
-            else
-            {
-                // super column
-                ByteBuffer scolname = acosc.super_column.name;
-                List<Column> scolcols = new 
ArrayList<Column>(acosc.super_column.columns.size());
-                for (org.apache.cassandra.avro.Column acol : 
acosc.super_column.columns)
-                    scolcols.add(avroToThrift(acol));
-                cosc.setSuper_column(new SuperColumn(scolname, scolcols));
-            }
-        }
-        else
+        if (acosc == null)
         {
             // deletion
+            assert amut.deletion != null;
             Deletion deletion = new Deletion(amut.deletion.timestamp);
             mutation.setDeletion(deletion);
+
             org.apache.cassandra.avro.SlicePredicate apred = 
amut.deletion.predicate;
-            if (amut.deletion.super_column != null)
+            if (apred == null && amut.deletion.super_column == null)
+            {
+                // leave Deletion alone to delete entire row
+            }
+            else if (amut.deletion.super_column != null)
+            {
                 // super column
                 
deletion.setSuper_column(ByteBufferUtil.getArray(amut.deletion.super_column));
+            }
             else if (apred.column_names != null)
             {
                 // column names
@@ -184,6 +174,24 @@ implements org.apache.hadoop.mapred.Reco
                 deletion.setPredicate(new 
SlicePredicate().setSlice_range(avroToThrift(apred.slice_range)));
             }
         }
+        else
+        {
+            // creation
+            ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
+            mutation.setColumn_or_supercolumn(cosc);
+            if (acosc.column != null)
+                // standard column
+                cosc.setColumn(avroToThrift(acosc.column));
+            else
+            {
+                // super column
+                ByteBuffer scolname = acosc.super_column.name;
+                List<Column> scolcols = new 
ArrayList<Column>(acosc.super_column.columns.size());
+                for (org.apache.cassandra.avro.Column acol : 
acosc.super_column.columns)
+                    scolcols.add(avroToThrift(acol));
+                cosc.setSuper_column(new SuperColumn(scolname, scolcols));
+            }
+        }
         return mutation;
     }
 


Reply via email to