There should be another exception about HBaseResourceStore earlier in the
log. Identify that and you shall get more clues. Suspect something wrong
connecting to HBase.

On Mon, Mar 28, 2016 at 9:50 AM, [email protected] <[email protected]> wrote:

> I use hdp install kylin 1.5 version, installation is complete start kylin,
> interface access fails, view the log shows:
> INFO: Initializing Spring root WebApplicationContext
> Mar 28, 2016 9:26:42 AM org.apache.catalina.core.StandardContext
> listenerStart
> SEVERE: Exception sending context initialized event to listener instance
> of class org.springframework.web.context.ContextLoaderListener
> org.springframework.beans.factory.BeanCreationException: Error creating
> bean with name 'queryService': Injection of autowired dependencies failed;
> nested exception is
> org.springframework.beans.factory.BeanCreationException: Could not autowire
> field: private org.apache.kylin.rest.service.CacheService
> org.apache.kylin.rest.service.QueryService.cacheService; nested exception
> is org.springframework.beans.factory.BeanCreationException: Error creating
> bean with name 'cacheService': Invocation of init method failed; nested
> exception is java.lang.IllegalArgumentException: Failed to find metadata
> store by url: kylin_metadata@hbase
>
> I then modified the kylin.metadata.url = kylin metadata qa @ hbase:
> localhost: 2181: / hbase-unsecure
> Error:
> ryService.cacheService; nested exception is
> org.springframework.beans.factory.BeanCreationException: Error creating
> bean with name 'cacheService': Invocation of init method failed; nested
> exception is java.lang.IllegalArgumentException: Failed to find metadata
> store by url: kylin_metadata_qa@hbase:localhost:2181:/hbase-unsecure
>         at
> org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessPropertyValues(AutowiredAnnotationBeanPostProcessor.java:287)
>         at
> org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1106)
>         at
> org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:517)
>         at
> org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:456)
>         at
> org.springframework.beans.factory.support.AbstractBeanFactory$1.getObject(AbstractBeanFactory.java:294)
>         at
> org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:225)
>         at
> org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:291)
>         at
> org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:193)
>         at
> org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:609)
>         at
> org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:918)
>         at
> org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:469)
>         at
> org.springframework.web.context.ContextLoader.configureAndRefreshWebApplicationContext(ContextLoader.java:383)
>         at
> org.springframework.web.context.ContextLoader.initWebApplicationContext(ContextLoader.java:283)
>         at
> org.springframework.web.context.ContextLoaderListener.contextInitialized(ContextLoaderListener.java:111)
>         at
> org.apache.catalina.core.StandardContext.listenerStart(StandardContext.java:5016)
>         at
> org.apache.catalina.core.StandardContext.startInternal(StandardContext.java:5524)
>         at
> org.apache.catalina.util.LifecycleBase.start(LifecycleBase.java:150)
>         at
> org.apache.catalina.core.ContainerBase.addChildInternal(ContainerBase.java:901)
>         at
> org.apache.catalina.core.ContainerBase.addChild(ContainerBase.java:877)
>         at
> org.apache.catalina.core.StandardHost.addChild(StandardHost.java:649)
>         at
> org.apache.catalina.startup.HostConfig.deployWAR(HostConfig.java:1081)
>         at
> org.apache.catalina.startup.HostConfig$DeployWar.run(HostConfig.java:1877)
>         at
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>         at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>         at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>         at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>         at java.lang.Thread.run(Thread.java:745)
>
> 我的配置文件如下:
> [email protected]
>
> # List of web servers in use, this enables one web server instance to sync
> up with other servers.
> kylin.rest.servers=192.168.1.12:7070
>
> # The metadata store in hbase
> #kylin.metadata.url=kylin_metadata@hbase
> kylin.metadata.url=kylin_metadata_qa@hbase:localhost:2181:/hbase-unsecure
> # The storage for final cube file in hbase
> kylin.storage.url=hbase
> kylin.job.yarn.app.rest.check.status.url=
> http://spark04:8088/ws/v1/cluster/apps/${job_id}?
> kylin.job.yarn.app.rest.check.interval.seconds=20
> kylin.query.security.enabled=false
> # Temp folder in hdfs, make sure user has the right access to the hdfs
> directory
> kylin.hdfs.working.dir=/kylin_new
>
> # HBase Cluster FileSystem, which serving hbase, format as
> hdfs://hbase-cluster:8020
> # leave empty if hbase running on same cluster with hive and mapreduce
> kylin.hbase.cluster.fs=hdfs://mycluster/apps/hbase/data_new
> kylin.route.hive.enabled=true
> kylin.route.hive.url=jdbc:hive2://192.168.1.12:10000
>
> kylin.job.mapreduce.default.reduce.input.mb=500
>
> # max job retry on error, default 0: no retry
> kylin.job.retry=0
>
> # If true, job engine will not assume that hadoop CLI reside on the same
> server as it self
> # you will have to specify kylin.job.remote.cli.hostname,
> kylin.job.remote.cli.username and kylin.job.remote.cli.password
> # It should not be set to "true" unless you're NOT running Kylin.sh on a
> hadoop client machine
> # (Thus kylin instance has to ssh to another real hadoop client machine to
> execute hbase,hive,hadoop commands)
> kylin.job.run.as.remote.cmd=false
>
> # Only necessary when kylin.job.run.as.remote.cmd=true
> kylin.job.remote.cli.hostname=
>
> # Only necessary when kylin.job.run.as.remote.cmd=true
> kylin.job.remote.cli.username=
>
> # Only necessary when kylin.job.run.as.remote.cmd=true
> kylin.job.remote.cli.password=
>
> # Used by test cases to prepare synthetic data for sample cube
> kylin.job.remote.cli.working.dir=/tmp/kylin
>
> # Max count of concurrent jobs running
> kylin.job.concurrent.max.limit=10
>
> # Time interval to check hadoop job status
> kylin.job.yarn.app.rest.check.interval.seconds=10
>
> # Hive database name for putting the intermediate flat tables
> kylin.job.hive.database.for.intermediatetable=default
>
> # Whether calculate cube in mem in each mapper;
> kylin.job.cubing.inMem=true
>
> #default compression codec for htable,snappy,lzo,gzip,lz4
> kylin.hbase.default.compression.codec=snappy
>
> #the percentage of the sampling, default 100%
> kylin.job.cubing.inMem.sampling.percent=100
>
> # The cut size for hbase region, in GB.
> # E.g, for cube whose capacity be marked as "SMALL", split region per 10GB
> by default
> kylin.hbase.region.cut.small=5
> kylin.hbase.region.cut.medium=10
> kylin.hbase.region.cut.large=50
>
> # The hfile size of GB, smaller hfile leading to the converting hfile MR
> has more reducers and be faster
> # set 0 to disable this optimization
> kylin.hbase.hfile.size.gb=5
>
> # Enable/disable ACL check for cube query
> kylin.query.security.enabled=true
>
> # whether get job status from resource manager with kerberos authentication
> kylin.job.status.with.kerberos=false
>
> ## kylin security configurations
>
> # spring security profile, options: testing, ldap, saml
> # with "testing" profile, user can use pre-defined name/pwd like
> KYLIN/ADMIN to login
> kylin.security.profile=testing
>
> # default roles and admin roles in LDAP, for ldap and saml
> acl.defaultRole=ROLE_ANALYST,ROLE_MODELER
> acl.adminRole=ROLE_ADMIN
>
> #LDAP authentication configuration
> ldap.server=ldap://ldap_server:389
> ldap.username=
> ldap.password=
>
> #LDAP user account directory;
> ldap.user.searchBase=
> ldap.user.searchPattern=
> ldap.user.groupSearchBase=
>
> #LDAP service account directory
> ldap.service.searchBase=
> ldap.service.searchPattern=
> ldap.service.groupSearchBase=
>
> #SAML configurations for SSO
> # SAML IDP metadata file location
> saml.metadata.file=classpath:sso_metadata.xml
> saml.metadata.entityBaseURL=https://hostname/kylin
> saml.context.scheme=https
> saml.context.serverName=hostname
> saml.context.serverPort=443
> saml.context.contextPath=/kylin
>
>
> ganglia.group=
> ganglia.port=8664
>
> ## Config for mail service
>
> # If true, will send email notification;
> mail.enabled=false
> mail.host=
> mail.username=
> mail.password=
> mail.sender=
>
> ###########################config info for web#######################
>
> #help info ,format{name|displayName|link} ,optional
> kylin.web.help.length=4
> kylin.web.help.0=start|Getting Started|
> kylin.web.help.1=odbc|ODBC Driver|
> kylin.web.help.2=tableau|Tableau Guide|
> kylin.web.help.3=onboard|Cube Design Tutorial|
>
> #guide user how to build streaming cube
> kylin.web.streaming.guide=http://kylin.apache.org/
>
> #hadoop url link ,optional
> kylin.web.hadoop=
> #job diagnostic url link ,optional
> kylin.web.diagnostic=
> #contact mail on web page ,optional
> kylin.web.contact_mail=
>
> ###########################config info for front#######################
>
> #env DEV|QA|PROD
> deploy.env=QA
>
> ###########################deprecated configs#######################
> kylin.sandbox=true
>
>  kylin.web.hive.limit=20
>
>
> pleae help me,thanks !
>
>
> [email protected]
>

Reply via email to