shuaidonga opened a new issue #8289: druid 0.15.0 with cdh 5.9.2 index-hadoop 
happend error
URL: https://github.com/apache/incubator-druid/issues/8289
 
 
   druid cluster configuration:
    1、common.runtime.properties
   
druid.extensions.loadList=["mysql-metadata-storage","druid-kafka-indexing-service",
 "druid-datasketches","druid-hdfs-storage"]
   
druid.extensions.hadoopDependenciesDir=/opt/apache-druid-0.15.0-incubating/hadoop-dependencies
   # Log all runtime properties on startup. Disable to avoid logging properties 
on startup:
   druid.startup.logging.logProperties=true
   #
   # Zookeeper
   
druid.zk.service.host=10.205.131.162:2181,10.205.132.128:2181,10.205.208.165:2181
   druid.zk.paths.base=/druid
   #
   # Metadata storage
   #
   # For MySQL (make sure to include the MySQL JDBC driver on the classpath):
   druid.metadata.storage.type=mysql
   
druid.metadata.storage.connector.connectURI=jdbc:mysql://10.205.113.192:3306/druid
   druid.metadata.storage.connector.user=root
   druid.metadata.storage.connector.password=*******
   #
   # Deep storage
   # For HDFS:
   druid.storage.type=hdfs
   druid.storage.storageDirectory=/druid/segments
   # For HDFS:
   druid.indexer.logs.type=hdfs
   druid.indexer.logs.directory=/druid/indexing-logs
   druid.selectors.indexing.serviceName=druid/overlord
   druid.selectors.coordinator.serviceName=druid/coordinator
   druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
   druid.emitter=noop
   druid.emitter.logging.logLevel=info
   druid.indexing.doubleStorage=double
   
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password"]
   druid.sql.enable=true
   druid.lookup.enableLookupSyncOnStartup=false
   2、middleManager  runtime.properties
   druid.service=druid/middleManager
   druid.plaintextPort=8091
   druid.host=10.205.100.13
   # Number of tasks per middleManager
   druid.worker.capacity=8
   # Task launch parameters
   druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g 
-XX:MaxDirectMemorySize=1g -Duser.timezone=UTC+8 -Dfile.encoding=UTF-8  
-XX:+ExitOnOutOfMemoryError 
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
   druid.indexer.task.baseTaskDir=/vdb/druid/task
   # HTTP server threads
   druid.server.http.numThreads=60
   # Processing threads and buffers on Peons
   druid.indexer.fork.property.druid.processing.numMergeBuffers=2
   druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
   druid.indexer.fork.property.druid.processing.numThreads=1
   # Hadoop indexing
   druid.indexer.task.hadoopWorkingPath=/vdb/druid/hadoop-tmp
   
druid.indexer.task.defaultHadoopCoordinates=["org.apache.hadoop:hadoop-client:2.6.0-cdh5.9.2"]
   3、wikipedia-index-hadoop.json
   {
     "type" : "index_hadoop",
     "spec" : {
       "dataSchema" : {
         "dataSource" : "wikipedia",
         "parser" : {
           "type" : "hadoopyString",
           "parseSpec" : {
             "format" : "json",
             "dimensionsSpec" : {
               "dimensions" : [
                 "channel",
                 "cityName",
                 "comment",
                 "countryIsoCode",
                 "countryName",
                 "isAnonymous",
                 "isMinor",
                 "isNew",
                 "isRobot",
                 "isUnpatrolled",
                 "metroCode",
                 "namespace",
                 "page",
                 "regionIsoCode",
                 "regionName",
                 "user",
                 { "name": "added", "type": "long" },
                 { "name": "deleted", "type": "long" },
                 { "name": "delta", "type": "long" }
               ]
             },
             "timestampSpec" : {
               "format" : "auto",
               "column" : "time"
             }
           }
         },
         "metricsSpec" : [],
         "granularitySpec" : {
           "type" : "uniform",
           "segmentGranularity" : "day",
           "queryGranularity" : "none",
           "intervals" : ["2015-09-12/2015-09-13"],
           "rollup" : false
         }
       },
       "ioConfig" : {
         "type" : "hadoop",
         "inputSpec" : {
           "type" : "static",
           "paths" : "/user/druid/wikiticker-2015-09-12-sampled.json.gz"
         }
       },
       "tuningConfig" : {
         "type" : "hadoop",
         "partitionsSpec" : {
           "type" : "hashed",
           "targetPartitionSize" : 5000000
         },
         "forceExtendableShardSpecs" : true,
         "jobProperties" : {
           "mapreduce.job.user.classpath.first": "true",
           "mapreduce.map.java.opts" : "-Duser.timezone=UTC+8 
-Dfile.encoding=UTF-8",
           "mapreduce.reduce.java.opts" : "-Duser.timezone=UT+8 
-Dfile.encoding=UTF-8",
           "mapreduce.map.memory.mb" : 1024,
           "mapreduce.reduce.memory.mb" : 1024
         }
       }
     },
     "hadoopDependencyCoordinates": 
["org.apache.hadoop:hadoop-client:2.6.0-cdh5.9.2"]
   }
   
   4、submit  task happend error
   {
     "ingestionStatsAndErrors": {
       "taskId": "index_hadoop_wikipedia_2019-08-13T07:39:49.754Z",
       "payload": {
         "ingestionState": "DETERMINE_PARTITIONS",
         "unparseableEvents": null,
         "rowStats": {},
         "errorMsg": "java.lang.NoSuchFieldError: DEFAULT\n\tat 
org.apache.hadoop.mapreduce.TypeConverter.fromYarn(TypeConverter.java:322)\n\tat
 
org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:440)\n\tat
 org.apache.hadoop.mapred.YARNRunner.submitJob(YARNRunner.java:314)\n\tat 
org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:244)\n\tat
 org.apache.hadoop.mapreduce.Job$10.run(Job.java:1307)\n\tat 
org.apache.hadoop.mapreduce.Job$10.run(Job.java:1304)\n\tat 
java.security.AccessController.doPrivileged(Native Method)\n\tat 
javax.security.auth.Subject.doAs(Subject.java:422)\n\tat 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1836)\n\tat
 org.apache.hadoop.mapreduce.Job.submit(Job.java:1304)\n\tat 
org.apache.druid.indexer.DetermineHashedPartitionsJob.run(DetermineHashedPartitionsJob.java:124)\n\tat
 org.apache.druid.indexer.JobHelper.runSingleJob(JobHelper.java:384)\n\tat 
org.apache.druid.indexer.HadoopDruidDetermineConfigurationJob.run(HadoopDruidDetermineConfigurationJob.java:58)\n\tat
 
org.apache.druid.indexing.common.task.HadoopIndexTask$HadoopDetermineConfigInnerProcessingRunner.runTask(HadoopIndexTask.java:617)\n\tat
 sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat
 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat
 java.lang.reflect.Method.invoke(Method.java:498)\n\tat 
org.apache.druid.indexing.common.task.HadoopIndexTask.runInternal(HadoopIndexTask.java:309)\n\tat
 
org.apache.druid.indexing.common.task.HadoopIndexTask.run(HadoopIndexTask.java:244)\n\tat
 
org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner$SingleTaskBackgroundRunnerCallable.call(SingleTaskBackgroundRunner.java:419)\n\tat
 
org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner$SingleTaskBackgroundRunnerCallable.call(SingleTaskBackgroundRunner.java:391)\n\tat
 java.util.concurrent.FutureTask.run(FutureTask.java:266)\n\tat 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat
 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat
 java.lang.Thread.run(Thread.java:748)\n"
       },
       "type": "ingestionStatsAndErrors"
     }
   }
   
   How should I do it?
   tks!
   
   
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to