mapreduce.map.memory.mb=1638 mapreduce.map.java.opts -Djava.net.preferIPv4Stack=true -Xmx1395864371 mapreduce.reduce.memory.mb=2458 mapreduce.reduce.java.opts -Djava.net.preferIPv4Stack=true -Xmx2147483648 mapreduce.reduce.shuffle.input.buffer.percent 0.70
Apache Pig version 0.12.0-cdh5.2.0 2014-10-27 13:31 GMT+08:00 Jack Alexandria <panzhen.j...@gmail.com>: > hi! > I config: > mapreduce.map.memory.mb=1638 > > > 2014-10-27 08:46:16,824 WARN [main] org.apache.hadoop.conf.Configuration: > job.xml:an attempt to override final parameter: > mapreduce.job.end-notification.max.retry.interval; Ignoring. > 2014-10-27 08:46:16,877 WARN [main] org.apache.hadoop.conf.Configuration: > job.xml:an attempt to override final parameter: > mapreduce.job.end-notification.max.attempts; Ignoring. > 2014-10-27 08:46:17,199 INFO [main] > org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from > hadoop-metrics2.properties > 2014-10-27 08:46:17,282 INFO [main] > org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot > period at 10 second(s). > 2014-10-27 08:46:17,282 INFO [main] > org.apache.hadoop.metrics2.impl.MetricsSystemImpl: MapTask metrics system > started > 2014-10-27 08:46:17,301 INFO [main] org.apache.hadoop.mapred.YarnChild: > Executing with tokens: > 2014-10-27 08:46:17,301 INFO [main] org.apache.hadoop.mapred.YarnChild: > Kind: YARN_AM_RM_TOKEN, Service: 172.16.40.102:8030, Ident: > (org.apache.hadoop.yarn.security.AMRMTokenIdentifier@41a68961) > 2014-10-27 08:46:17,449 INFO [main] org.apache.hadoop.mapred.YarnChild: > Kind: mapreduce.job, Service: job_1414142946093_8612, Ident: > (org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier@31e55839) > 2014-10-27 08:46:17,450 INFO [main] org.apache.hadoop.mapred.YarnChild: > Kind: RM_DELEGATION_TOKEN, Service: 172.16.40.102:8032, Ident: > (owner=cloudil, renewer=oozie mr token, realUser=oozie, > issueDate=1414370700648, maxDate=1414975500648, sequenceNumber=9048, > masterKeyId=4) > 2014-10-27 08:46:17,502 INFO [main] org.apache.hadoop.mapred.YarnChild: > Sleeping for 0ms before retrying again. Got null now. > 2014-10-27 08:46:17,789 INFO [main] org.apache.hadoop.mapred.YarnChild: > mapreduce.cluster.local.dir for child: > /rawdata01/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/rawdata02/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/rawdata03/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/rawdata04/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/xdrdata01/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/xdrdata02/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/xdrdata03/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612,/xdrdata04/yarn/nm/usercache/cloudil/appcache/application_1414142946093_8612 > 2014-10-27 08:46:17,891 WARN [main] org.apache.hadoop.conf.Configuration: > job.xml:an attempt to override final parameter: > mapreduce.job.end-notification.max.retry.interval; Ignoring. > 2014-10-27 08:46:17,915 WARN [main] org.apache.hadoop.conf.Configuration: > job.xml:an attempt to override final parameter: > mapreduce.job.end-notification.max.attempts; Ignoring. > 2014-10-27 08:46:18,370 INFO [main] > org.apache.hadoop.conf.Configuration.deprecation: session.id is > deprecated. Instead, use dfs.metrics.session-id > 2014-10-27 08:46:19,427 INFO [main] > org.apache.hadoop.conf.Configuration.deprecation: > mapred.textoutputformat.separator is deprecated. Instead, use > mapreduce.output.textoutputformat.separator > 2014-10-27 08:46:19,519 INFO [main] org.apache.hadoop.mapred.Task: Using > ResourceCalculatorProcessTree : [ ] > 2014-10-27 08:46:19,870 INFO [main] org.apache.hadoop.mapred.MapTask: > Processing split: Number of splits :1 > Total Length = 143988376 > Input split[0]: > Length = 143988376 > Locations: > > ----------------------- > > 2014-10-27 08:46:19,899 INFO [main] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigRecordReader: > Current split being processed > hdfs://new-nameservice/csv-pig/gbiupsdns/startdate=2014102707/201410270740-gbiupsdns-pssig102-68902910.DAT:268435456+143988376 > 2014-10-27 08:46:20,100 INFO [main] org.apache.hadoop.mapred.MapTask: > (EQUATOR) 0 kvi 134217724(536870896) > 2014-10-27 08:46:20,100 INFO [main] org.apache.hadoop.mapred.MapTask: > mapreduce.task.io.sort.mb: 512 > 2014-10-27 08:46:20,100 INFO [main] org.apache.hadoop.mapred.MapTask: soft > limit at 429496736 > 2014-10-27 08:46:20,100 INFO [main] org.apache.hadoop.mapred.MapTask: > bufstart = 0; bufvoid = 536870912 > 2014-10-27 08:46:20,100 INFO [main] org.apache.hadoop.mapred.MapTask: > kvstart = 134217724; length = 33554432 > 2014-10-27 08:46:20,122 INFO [main] org.apache.hadoop.mapred.MapTask: Map > output collector class = org.apache.hadoop.mapred.MapTask$MapOutputBuffer > 2014-10-27 08:46:20,256 INFO [main] > org.apache.pig.data.SchemaTupleBackend: Key [pig.schematuple] was not > set... will not generate code. > 2014-10-27 08:46:21,106 INFO [main] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapReduce$Map: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:46:21,110 INFO [main] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigRecordReader: > Created input record counter: Input records from _1_startdate=2014102707 > 2014-10-27 08:48:59,704 INFO [main] org.apache.hadoop.mapred.MapTask: > Spilling map output > 2014-10-27 08:48:59,704 INFO [main] org.apache.hadoop.mapred.MapTask: > bufstart = 0; bufend = 402331648; bufvoid = 536870912 > 2014-10-27 08:48:59,704 INFO [main] org.apache.hadoop.mapred.MapTask: > kvstart = 134217724(536870896); kvend = 127426452(509705808); length = > 6791273/33554432 > 2014-10-27 08:48:59,704 INFO [main] org.apache.hadoop.mapred.MapTask: > (EQUATOR) 409149040 kvi 102287256(409149024) > 2014-10-27 08:49:03,421 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:08,209 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:08,531 INFO [Service Thread] > org.apache.pig.impl.util.SpillableMemoryManager: first memory handler call- > Usage threshold init = 930611200(908800K) used = 658047360(642624K) > committed = 930611200(908800K) max = 931135488(909312K) > 2014-10-27 08:49:13,128 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:14,617 INFO [Service Thread] > org.apache.pig.impl.util.SpillableMemoryManager: first memory handler call > - Collection threshold init = 930611200(908800K) used = 607517568(593278K) > committed = 931135488(909312K) max = 931135488(909312K) > 2014-10-27 08:49:17,579 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:25,038 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:28,230 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:31,571 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:35,540 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:40,871 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:44,335 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:49:56,430 INFO [main] org.apache.hadoop.mapred.MapTask: > Starting flush of map output > 2014-10-27 08:49:59,529 INFO [SpillThread] > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine: > Aliases being processed per job phase (AliasName[line,offset]): M: > load_gbiups_dns[28,19],load_gbiups_dns[-1,-1],gbiups_dns[83,13],gbiups_dns[-1,-1],C[139,4],result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30],load_gbiups_http[38,20],load_gbiups_http[-1,-1],gbiups_http[24,14],gbiups_http[-1,-1] > C: > result[144,9],group_result[142,15],result_apptypewhole[214,22],group_result_apptypewhole[212,28],result_noapptype[283,19],group_result_noapptype[281,25],result_city[352,14],group_result_city[350,20],result_city_apptypewhole[421,27],group_result_city_apptypewhole[419,33],result_city_noapptype[490,24],group_result_city_noapptype[488,30] > R: > result[144,9],result_apptypewhole[214,22],result_noapptype[283,19],result_city[352,14],result_city_apptypewhole[421,27],result_city_noapptype[490,24] > 2014-10-27 08:52:28,501 INFO [communication thread] > org.apache.hadoop.mapred.Task: Communication exception: > java.lang.OutOfMemoryError: GC overhead limit exceeded > at java.nio.HeapByteBuffer.<init>(HeapByteBuffer.java:57) > at java.nio.ByteBuffer.allocate(ByteBuffer.java:331) > at sun.nio.cs.StreamDecoder.<init>(StreamDecoder.java:250) > at sun.nio.cs.StreamDecoder.<init>(StreamDecoder.java:230) > at sun.nio.cs.StreamDecoder.forInputStreamReader(StreamDecoder.java:69) > at java.io.InputStreamReader.<init>(InputStreamReader.java:74) > at java.io.FileReader.<init>(FileReader.java:72) > at > org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.constructProcessInfo(ProcfsBasedProcessTree.java:495) > at > org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.updateProcessTree(ProcfsBasedProcessTree.java:214) > at org.apache.hadoop.mapred.Task.updateResourceCounters(Task.java:845) > at org.apache.hadoop.mapred.Task.updateCounters(Task.java:984) > at org.apache.hadoop.mapred.Task.access$500(Task.java:78) > at org.apache.hadoop.mapred.Task$TaskReporter.run(Task.java:733) > at java.lang.Thread.run(Thread.java:745) > > 2014-10-27 08:52:34,664 FATAL [main] org.apache.hadoop.mapred.Task: Task > attempt_1414142946093_8612_m_000005_0 failed : java.lang.OutOfMemoryError: > GC overhead limit exceeded > at org.apache.pig.data.DefaultTuple.<init>(DefaultTuple.java:69) > at org.apache.pig.data.BinSedesTuple.<init>(BinSedesTuple.java:66) > at > org.apache.pig.data.BinSedesTupleFactory.newTuple(BinSedesTupleFactory.java:38) > at > org.apache.pig.data.utils.SedesHelper.readGenericTuple(SedesHelper.java:142) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:349) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:318) > at > org.apache.pig.data.InternalCachedBag$CachedBagIterator.hasNext(InternalCachedBag.java:208) > at > org.apache.pig.builtin.AlgebraicLongMathBase.doTupleWork(AlgebraicLongMathBase.java:72) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:108) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:102) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNext(POUserFunc.java:330) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNextTuple(POUserFunc.java:369) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.getNext(PhysicalOperator.java:333) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.processPlan(POForEach.java:378) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.getNextTuple(POForEach.java:298) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.processInput(PhysicalOperator.java:281) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange.getNextTuple(POLocalRearrange.java:263) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.runPipeline(PODemux.java:220) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.getNextTuple(PODemux.java:210) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.processOnePackageOutput(PigCombiner.java:183) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:161) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:51) > at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171) > at org.apache.hadoop.mapred.Task$NewCombinerRunner.combine(Task.java:1651) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.sortAndSpill(MapTask.java:1632) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.access$900(MapTask.java:873) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer$SpillThread.run(MapTask.java:1525) > > 2014-10-27 08:52:34,890 INFO [main] org.apache.hadoop.mapred.MapTask: > Starting flush of map output > 2014-10-27 08:52:34,890 FATAL [main] org.apache.hadoop.mapred.Task: Task > attempt_1414142946093_8612_m_000005_0 failed : java.lang.OutOfMemoryError: > GC overhead limit exceeded > at org.apache.pig.data.DefaultTuple.<init>(DefaultTuple.java:69) > at org.apache.pig.data.BinSedesTuple.<init>(BinSedesTuple.java:66) > at > org.apache.pig.data.BinSedesTupleFactory.newTuple(BinSedesTupleFactory.java:38) > at > org.apache.pig.data.utils.SedesHelper.readGenericTuple(SedesHelper.java:142) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:349) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:318) > at > org.apache.pig.data.InternalCachedBag$CachedBagIterator.hasNext(InternalCachedBag.java:208) > at > org.apache.pig.builtin.AlgebraicLongMathBase.doTupleWork(AlgebraicLongMathBase.java:72) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:108) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:102) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNext(POUserFunc.java:330) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNextTuple(POUserFunc.java:369) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.getNext(PhysicalOperator.java:333) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.processPlan(POForEach.java:378) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.getNextTuple(POForEach.java:298) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.processInput(PhysicalOperator.java:281) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange.getNextTuple(POLocalRearrange.java:263) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.runPipeline(PODemux.java:220) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.getNextTuple(PODemux.java:210) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.processOnePackageOutput(PigCombiner.java:183) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:161) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:51) > at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171) > at org.apache.hadoop.mapred.Task$NewCombinerRunner.combine(Task.java:1651) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.sortAndSpill(MapTask.java:1632) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.access$900(MapTask.java:873) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer$SpillThread.run(MapTask.java:1525) > > 2014-10-27 08:52:34,897 INFO [main] org.apache.hadoop.mapred.MapTask: > Ignoring exception during close for > org.apache.hadoop.mapred.MapTask$NewOutputCollector@1475a7cf > java.io.IOException: Spill failed > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.checkSpillException(MapTask.java:1555) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1464) > at > org.apache.hadoop.mapred.MapTask$NewOutputCollector.close(MapTask.java:720) > at org.apache.hadoop.mapred.MapTask.closeQuietly(MapTask.java:2014) > at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:794) > at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) > at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1614) > at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:163) > Caused by: java.lang.OutOfMemoryError: GC overhead limit exceeded > at org.apache.pig.data.DefaultTuple.<init>(DefaultTuple.java:69) > at org.apache.pig.data.BinSedesTuple.<init>(BinSedesTuple.java:66) > at > org.apache.pig.data.BinSedesTupleFactory.newTuple(BinSedesTupleFactory.java:38) > at > org.apache.pig.data.utils.SedesHelper.readGenericTuple(SedesHelper.java:142) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:349) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:318) > at > org.apache.pig.data.InternalCachedBag$CachedBagIterator.hasNext(InternalCachedBag.java:208) > at > org.apache.pig.builtin.AlgebraicLongMathBase.doTupleWork(AlgebraicLongMathBase.java:72) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:108) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:102) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNext(POUserFunc.java:330) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNextTuple(POUserFunc.java:369) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.getNext(PhysicalOperator.java:333) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.processPlan(POForEach.java:378) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.getNextTuple(POForEach.java:298) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.processInput(PhysicalOperator.java:281) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange.getNextTuple(POLocalRearrange.java:263) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.runPipeline(PODemux.java:220) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.getNextTuple(PODemux.java:210) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.processOnePackageOutput(PigCombiner.java:183) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:161) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:51) > at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171) > at org.apache.hadoop.mapred.Task$NewCombinerRunner.combine(Task.java:1651) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.sortAndSpill(MapTask.java:1632) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.access$900(MapTask.java:873) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer$SpillThread.run(MapTask.java:1525) > 2014-10-27 08:52:34,900 WARN [main] org.apache.hadoop.mapred.YarnChild: > Exception running child : java.io.IOException: Spill failed > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.checkSpillException(MapTask.java:1555) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1464) > at > org.apache.hadoop.mapred.MapTask$NewOutputCollector.close(MapTask.java:720) > at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:790) > at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) > at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1614) > at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:163) > Caused by: java.lang.OutOfMemoryError: GC overhead limit exceeded > at org.apache.pig.data.DefaultTuple.<init>(DefaultTuple.java:69) > at org.apache.pig.data.BinSedesTuple.<init>(BinSedesTuple.java:66) > at > org.apache.pig.data.BinSedesTupleFactory.newTuple(BinSedesTupleFactory.java:38) > at > org.apache.pig.data.utils.SedesHelper.readGenericTuple(SedesHelper.java:142) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:349) > at org.apache.pig.data.BinInterSedes.readDatum(BinInterSedes.java:318) > at > org.apache.pig.data.InternalCachedBag$CachedBagIterator.hasNext(InternalCachedBag.java:208) > at > org.apache.pig.builtin.AlgebraicLongMathBase.doTupleWork(AlgebraicLongMathBase.java:72) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:108) > at > org.apache.pig.builtin.AlgebraicLongMathBase$Intermediate.exec(AlgebraicLongMathBase.java:102) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNext(POUserFunc.java:330) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.expressionOperators.POUserFunc.getNextTuple(POUserFunc.java:369) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.getNext(PhysicalOperator.java:333) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.processPlan(POForEach.java:378) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POForEach.getNextTuple(POForEach.java:298) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.processInput(PhysicalOperator.java:281) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange.getNextTuple(POLocalRearrange.java:263) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.runPipeline(PODemux.java:220) > at > org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux.getNextTuple(PODemux.java:210) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.processOnePackageOutput(PigCombiner.java:183) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:161) > at > org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigCombiner$Combine.reduce(PigCombiner.java:51) > at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171) > at org.apache.hadoop.mapred.Task$NewCombinerRunner.combine(Task.java:1651) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.sortAndSpill(MapTask.java:1632) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer.access$900(MapTask.java:873) > at > org.apache.hadoop.mapred.MapTask$MapOutputBuffer$SpillThread.run(MapTask.java:1525) >