Here is an updated code
Naama
/**
* <pre>
* 'Toy tables' for experiencing with MapReduce over HBase
*
* grades table - a HBase table of the form -
* raw id is a student name
* column name is Course:course_name
* cell value is the student's grade in the course 'course_name'
*
* Exmaple:
*
* Course:Math | Course:Art | Course:Sports
* ----------------------------------------------------------------
* Dan 87 97 99
* Dana 100 100 80
*
* =======================================
*
* courses table - a HBase table of the form -
* raw id is a course name
* column name is Stats:Average
* cell value is the average grade in that course, computed by a map reduce
job
*
* Exmaple:
*
* Stats:Average
* --------------
* Art 86
* Match 77
* </pre>
* @see GradesTableMapReduce
*
*
*/
public class GradesTable {
// Table operation type
enum OP { CREATE, DELETE, DUMP };
public static final String GRADES_TABLE_NAME = "grades";
public static final String COURSE_TABLE_NAME = "courses";
public static final String COURSE_FAMILY = "Course:";
// A column family holding grades statistics
public static final String STATS_FAMILY = "Stats:";
// A column member holding average grade in course
public static final String AVG = "Average";
private static final String [] STUDENT_NAMES = {
"Dan", "Dana", "Sara", "David"
};
private static final String [] COURSE_NAMES = {
"Math", "Art", "Sports"
};
private HBaseConfiguration conf;
private HBaseAdmin admin;
private HTableDescriptor grades_desc;
private HTableDescriptor courses_desc;
// Randomly generate a grade
private Random rand;
public GradesTable() throws IOException {
conf = new HBaseConfiguration();
admin = new HBaseAdmin(conf);
grades_desc = new HTableDescriptor(GRADES_TABLE_NAME);
courses_desc = new HTableDescriptor(COURSE_TABLE_NAME);
rand = new Random();
}
/**
* Create tables and populate with content
*/
public void create() throws IOException {
grades_desc.addFamily(new HColumnDescriptor(COURSE_FAMILY));
courses_desc.addFamily(new HColumnDescriptor(STATS_FAMILY));
admin.createTable(grades_desc);
admin.createTable(courses_desc);
System.out.println("Tables created");
// Populate grades table with students and their grades in courses
HTable table = new HTable(conf, new Text(GRADES_TABLE_NAME));
// Start an update transaction, student name is row id
for (int i = 0; i < STUDENT_NAMES.length; i++) {
System.out.println("<<< Row " + i + ", student: " + STUDENT_NAMES[i] +
" >>>");
Text stuName = new Text(STUDENT_NAMES[i]);
long writeid = table.startUpdate(stuName);
for (int j = 0; j < COURSE_NAMES.length; j++) {
Text courseColumn = new Text(COURSE_FAMILY + COURSE_NAMES[j]);
// Put a cell with a student's grade in this course
int grade = Math.abs(rand.nextInt()) % 101;
table.put(writeid, courseColumn, new IntWritable(grade));
System.out.println("Course: " + COURSE_NAMES[j] + ", grade: " +
grade);
}
table.commit(writeid);
}
System.out.println("Grades Table populated");
}
}
====================================================
/**
* A map reduce job over [EMAIL PROTECTED] GradesTable}
* The job produces for each course the average grade in that course.
* It puts the average in a separate table which holds course statistics.
*
*/
public class GradesTableMapReduce extends Configured implements Tool {
/**
* Map a row to {key, value} pairs.
* Emit a {course, grade} pair for each course grade appearing in the
student row.
* E.g. Sara {Math:62, Art:45, Sports:87} -> {Math, 62}, {Art, 45},
{Sports, 87}
*
*/
public static class GradesTableMap extends TableMap<Text, IntWritable> {
@Override
public void map(HStoreKey key, MapWritable value,
OutputCollector<Text, IntWritable> output, Reporter reporter) throws
IOException {
// Walk through the columns
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
// Column name is course name
Text course = (Text) e.getKey();
// Remove the family prefix
String courseStr = course.toString();
courseStr =
courseStr.substring(courseStr.indexOf(':') + 1);
course = new Text(courseStr);
byte [] gradeInBytes = ((ImmutableBytesWritable)
e.getValue()).get();
DataInputStream in = new DataInputStream(new
ByteArrayInputStream(gradeInBytes));
IntWritable grade = new IntWritable();
grade.readFields(in);
// Emit course name and a grade
output.collect(course, grade);
}
}
}
/**
* Reduce - compute an average of key's values which is actually the
average grade in each course.
* E.g. {Math, {62, 45, 87}} -> {Math, 65.6}
*
*/
public static class GradesTableReduce extends TableReduce<Text,
IntWritable> {
@Override
// key is course name, values are grades in the course
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, MapWritable> output, Reporter reporter)
throws IOException {
// Compute grades average
int total = 0;
int sum = 0;
while (values.hasNext()) {
total++;
sum += values.next().get();
}
float average = sum / total;
// We put the average as a separate column in the courses table
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
FloatWritable avgWritable = new FloatWritable(average);
avgWritable.write(out);
MapWritable map = new MapWritable();
map.put(new Text(GradesTable.STATS_FAMILY + GradesTable.AVG),
new ImmutableBytesWritable(baos.toByteArray()));
output.collect(key, map);
}
}
/**
* Run
*/
public int run(String[] args) throws Exception {
JobConf jobConf = new JobConf();
jobConf.setJobName("compute average grades");
jobConf.setNumReduceTasks(1);
// All columns in the course family (i.e. all grades) get into the map
TableMap.initJob(GradesTable.GRADES_TABLE_NAME,
GradesTable.COURSE_FAMILY,
GradesTableMap.class, jobConf);
// Reduce output (course average grade) is put in the courses table
TableReduce.initJob(GradesTable.COURSE_TABLE_NAME,
GradesTableReduce.class, jobConf);
// Map produces a value which is an IntWritable
jobConf.setMapOutputValueClass(IntWritable.class);
JobClient.runJob(jobConf);
return 0;
}
public static void main(String [] args) throws Exception {
ToolRunner.run(new Configuration(), new GradesTableMapReduce(), args);
}
}
On Tue, Jun 24, 2008 at 5:57 PM, stack <[EMAIL PROTECTED]> wrote:
> Naama Kraus wrote:
>
>> ..
>> What if the mission was the following - for each course in the table,
>> calculate the average grade in that course. In that case both map and
>> reduce
>> are required, is that correct ? Map will emit for each row a {course_name,
>> grade} pair. Reduce will emit the average grades for each course
>> (course_name, avg_grade}. Output can be put in a separate table (probably
>> one holding courses information). Does this make sense ?
>>
>>
>>
>>
> That'll work.
>
> * At a higher level, I'd suggest a refactoring. Do all of your work in
>>> the map phase. Have no reduce phase. I suggest this because as is, all
>>> rows emitted by the map are being sorted by the MR framework. But hbase
>>> will also do a sort on insert. Avoid paying the prices of the MR sort.
>>> Do
>>> your calculation in the map and then insert the result at map time.
>>> Either
>>> emit nothing or, emit a '1' for every row processed so the MR counters
>>> tell
>>> a story about your MR job.*
>>>
>>>
>>>
>>
>> That's an interesting point. So if both map and reduce are a required,
>> then
>> two sorts must take place. Is that correct ?
>>
>>
> Yes but with your new example, they are orthogonal toward different ends;
> the first does collecting together all course data and the second orders
> courses in hbase lexicographically (presuming course is primary key).
>
> St.Ack
>
--
oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo 00 oo
00 oo 00 oo
"If you want your children to be intelligent, read them fairy tales. If you
want them to be more intelligent, read them more fairy tales." (Albert
Einstein)