Hi,

I have implemented a custom TableInputFormat.
I call it TableInputFormatMapPerRow and that is exactly what it does.
The getSplits() of my custom TableInputFormat creates a TableSplit for each
row in the HBase.
But when I actually run an application with my custom TableInputFormat,
there are not enough map tasks than there should be.
I really don't know what I am doing wrong.
Any suggestions please?
Below is my TableInputFormatMapPerRow.java

Ed

----------------------------------------------------------------------------------------------

/**
 * Copyright 2007 The Apache Software Foundation
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hbase.mapreduce;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.StringUtils;

import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Bytes;

/**
 * Convert HBase tabular data into a format that is consumable by
Map/Reduce.
 */
public class TableInputFormatMapPerRow extends
InputFormat<ImmutableBytesWritable, Result>
implements Configurable {

  private final Log LOG =
LogFactory.getLog(TableInputFormatMapPerRow.class);

  /** Job parameter that specifies the input table. */
  public static final String INPUT_TABLE = "hbase.mapreduce.inputtable";
  /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is
specified.
   * See {@link TableMapReduceUtil#convertScanToString(Scan)} for more
details.
   */
  public static final String SCAN = "hbase.mapreduce.scan";
  /** Column Family to Scan */
  public static final String SCAN_COLUMN_FAMILY =
"hbase.mapreduce.scan.column.family";
  /** Space delimited list of columns to scan. */
  public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns";
  /** The timestamp used to filter columns with a specific timestamp. */
  public static final String SCAN_TIMESTAMP =
"hbase.mapreduce.scan.timestamp";
  /** The starting timestamp used to filter columns with a specific range of
versions. */
  public static final String SCAN_TIMERANGE_START =
"hbase.mapreduce.scan.timerange.start";
  /** The ending timestamp used to filter columns with a specific range of
versions. */
  public static final String SCAN_TIMERANGE_END =
"hbase.mapreduce.scan.timerange.end";
  /** The maximum number of version to return. */
  public static final String SCAN_MAXVERSIONS =
"hbase.mapreduce.scan.maxversions";
  /** Set to false to disable server-side caching of blocks for this scan.
*/
  public static final String SCAN_CACHEBLOCKS =
"hbase.mapreduce.scan.cacheblocks";
  /** The number of rows for caching that will be passed to scanners. */
  public static final String SCAN_CACHEDROWS =
"hbase.mapreduce.scan.cachedrows";

  /** The configuration. */
  private Configuration conf = null;
  private HTable table = null;
  private Scan scan = null;
  private TableRecordReader tableRecordReader = null;

  /**
   * Returns the current configuration.
   *
   * @return The current configuration.
   * @see org.apache.hadoop.conf.Configurable#getConf()
   */
  @Override
  public Configuration getConf() {
    return conf;
  }

  /**
   * Sets the configuration. This is used to set the details for the table
to
   * be scanned.
   *
   * @param configuration  The configuration to set.
   * @see org.apache.hadoop.conf.Configurable#setConf(
   *   org.apache.hadoop.conf.Configuration)
   */
  @Override
  public void setConf(Configuration configuration) {
    this.conf = configuration;
    String tableName = conf.get(INPUT_TABLE);
    /*
    try {
      setHTable(new HTable(new Configuration(conf), tableName));
    } catch (Exception e) {
      LOG.error(StringUtils.stringifyException(e));
    }
    */
    try
    {
        this.table = new HTable(new Configuration(conf), tableName);
    }
    catch (Exception e)
    {
        LOG.error(StringUtils.stringifyException(e));
    }

    Scan scan = null;

    if (conf.get(SCAN) != null) {
      try {
        scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN));
      } catch (IOException e) {
        LOG.error("An error occurred.", e);
      }
    } else {
      try {
        scan = new Scan();

        if (conf.get(SCAN_COLUMNS) != null) {
          scan.addColumns(conf.get(SCAN_COLUMNS));
        }

        if (conf.get(SCAN_COLUMN_FAMILY) != null) {
          scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY)));
        }

        if (conf.get(SCAN_TIMESTAMP) != null) {
          scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
        }
        if (conf.get(SCAN_TIMERANGE_START) != null &&
conf.get(SCAN_TIMERANGE_END) != null) {
          scan.setTimeRange(
              Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
              Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
        }

        if (conf.get(SCAN_MAXVERSIONS) != null) {
          scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
        }

        if (conf.get(SCAN_CACHEDROWS) != null) {
          scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
        }

        // false by default, full table scans generate too much BC churn
        scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
      } catch (Exception e) {
          LOG.error(StringUtils.stringifyException(e));
      }
    }

    this.scan = scan;
    //setScan(scan);
  }

  /**
   * Builds a TableRecordReader. If no TableRecordReader was provided, uses
   * the default.
   *
   * @param split  The split to work with.
   * @param context  The current context.
   * @return The newly created record reader.
   * @throws IOException When creating the reader fails.
   * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader(
   *   org.apache.hadoop.mapreduce.InputSplit,
   *   org.apache.hadoop.mapreduce.TaskAttemptContext)
   */
  @Override
  public RecordReader<ImmutableBytesWritable, Result> createRecordReader(
      InputSplit split, TaskAttemptContext context)
  throws IOException {
    if (table == null) {
      throw new IOException("Cannot create a record reader because of a" +
          " previous error. Please look at the previous logs lines from" +
          " the task's full log for more details.");
    }
    TableSplit tSplit = (TableSplit) split;
    TableRecordReader trr = this.tableRecordReader;
    // if no table record reader was provided use default
    if (trr == null) {
      trr = new TableRecordReader();
    }
    Scan sc = new Scan(this.scan);
    sc.setStartRow(tSplit.getStartRow());
    sc.setStopRow(tSplit.getEndRow());
    trr.setScan(sc);
    trr.setHTable(table);
    trr.init();
    return trr;
  }

  /*
  This function splits the source table by each row. Then each row is fed to
mappers
  */
  @Override
  public List<InputSplit> getSplits(JobContext context) throws IOException
  {
    if (table == null)
    {
        throw new IOException("No table was provided.");
    }

    List<InputSplit> splitList = new ArrayList<InputSplit>();
    int count = 0;
    String regionLocation = "";
    byte[] rowId = null;
    //Scan tempScan = new Scan();
    scan.setFilter(new KeyOnlyFilter()); //We need only keys. Values are not
necessary.
    ResultScanner rscanner = table.getScanner(scan);
    for (Result rs: rscanner)
    {
        rowId = rs.getRow();
        System.err.println("split: " + Bytes.toString(rowId));
        regionLocation =
table.getRegionLocation(rowId).getServerAddress().getHostname();
        InputSplit split = new TableSplit(table.getTableName(), rowId,
rowId, regionLocation);
        splitList.add(split);
        //if (LOG.isDebugEnabled())
        //  LOG.debug("getSplits: split -> " + (count++) + " -> " + split);
    }
    rscanner.close();
    return splitList;
  }
}

Reply via email to