Hi Niranda,
I've attached the files here. I tested the connection by DAS datasource
"Test Connection" option. It says "Connection is healthy". Do I need to
pass any param in analtytics-datasources.xml file for mysql db?
Thanks.
On Tue, Sep 8, 2015 at 11:25 PM, Niranda Perera <[email protected]> wrote:
> Hi thanuja,
>
> can you attach the config files in repository/conf/analytics/* and
> repository/conf/datasources/*
>
> I suspect this is a connection issue.
>
> rgds
>
> On Tue, Sep 8, 2015 at 5:28 PM, Thanuja Uruththirakodeeswaran <
> [email protected]> wrote:
>
>> Hi Niranda,
>>
>> I'm trying the following query in spark:
>>
>> create temporary table cluster_member using CarbonJDBC options
>> (dataSource "ANALYTICS_PROCESSED_DATA_STORE", tableName "CLUSTER_MEMBER");
>>
>> But I'm getting the following error. How to fix this?
>>
>> org.apache.axis2.AxisFault: Exception occurred while trying to invoke
>> service method execute
>> at
>> org.apache.axis2.util.Utils.getInboundFaultFromMessageContext(Utils.java:531)
>> at
>> org.apache.axis2.description.OutInAxisOperationClient.handleResponse(OutInAxisOperation.java:370)
>> at
>> org.apache.axis2.description.OutInAxisOperationClient.send(OutInAxisOperation.java:445)
>> at
>> org.apache.axis2.description.OutInAxisOperationClient.executeImpl(OutInAxisOperation.java:225)
>> at
>> org.apache.axis2.client.OperationClient.execute(OperationClient.java:149)
>> at
>> org.wso2.carbon.analytics.spark.admin.stub.AnalyticsProcessorAdminServiceStub.execute(AnalyticsProcessorAdminServiceStub.java:912)
>> at
>> org.wso2.carbon.analytics.spark.ui.client.AnalyticsExecutionClient.executeScriptContent(AnalyticsExecutionClient.java:67)
>> at
>> org.apache.jsp.spark_002dmanagement.executeScript_005fajaxprocessor_jsp._jspService(executeScript_005fajaxprocessor_jsp.java:110)
>> at org.apache.jasper.runtime.HttpJspBase.service(HttpJspBase.java:70)
>> at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
>> at
>> org.apache.jasper.servlet.JspServletWrapper.service(JspServletWrapper.java:432)
>> at
>> org.apache.jasper.servlet.JspServlet.serviceJspFile(JspServlet.java:395)
>> at org.apache.jasper.servlet.JspServlet.service(JspServlet.java:339)
>> at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
>> at org.wso2.carbon.ui.JspServlet.service(JspServlet.java:155)
>> at org.wso2.carbon.ui.TilesJspServlet.service(TilesJspServlet.java:80)
>> at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
>> at
>> org.eclipse.equinox.http.helper.ContextPathServletAdaptor.service(ContextPathServletAdaptor.java:37)
>> at
>> org.eclipse.equinox.http.servlet.internal.ServletRegistration.service(ServletRegistration.java:61)
>> at
>> org.eclipse.equinox.http.servlet.internal.ProxyServlet.processAlias(ProxyServlet.java:128)
>> at
>> org.eclipse.equinox.http.servlet.internal.ProxyServlet.service(ProxyServlet.java:68)
>> at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
>> at
>> org.wso2.carbon.tomcat.ext.servlet.DelegationServlet.service(DelegationServlet.java:68)
>> at
>> org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:303)
>> at
>> org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:208)
>> at org.apache.tomcat.websocket.server.WsFilter.doFilter(WsFilter.java:52)
>> at
>> org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:241)
>> at
>> org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:208)
>> at
>> org.wso2.carbon.tomcat.ext.filter.CharacterSetFilter.doFilter(CharacterSetFilter.java:61)
>> at
>> org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:241)
>> at
>> org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:208)
>> at
>> org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:220)
>> at
>> org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:122)
>> at
>> org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:504)
>> at
>> org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:170)
>> at
>> org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:103)
>> at
>> org.wso2.carbon.tomcat.ext.valves.CompositeValve.continueInvocation(CompositeValve.java:99)
>> at
>> org.wso2.carbon.tomcat.ext.valves.CarbonTomcatValve$1.invoke(CarbonTomcatValve.java:47)
>> at
>> org.wso2.carbon.webapp.mgt.TenantLazyLoaderValve.invoke(TenantLazyLoaderValve.java:57)
>> at
>> org.wso2.carbon.event.receiver.core.internal.tenantmgt.TenantLazyLoaderValve.invoke(TenantLazyLoaderValve.java:48)
>> at
>> org.wso2.carbon.tomcat.ext.valves.TomcatValveContainer.invokeValves(TomcatValveContainer.java:47)
>> at
>> org.wso2.carbon.tomcat.ext.valves.CompositeValve.invoke(CompositeValve.java:62)
>> at
>> org.wso2.carbon.tomcat.ext.valves.CarbonStuckThreadDetectionValve.invoke(CarbonStuckThreadDetectionValve.java:159)
>> at
>> org.apache.catalina.valves.AccessLogValve.invoke(AccessLogValve.java:950)
>> at
>> org.wso2.carbon.tomcat.ext.valves.CarbonContextCreatorValve.invoke(CarbonContextCreatorValve.java:57)
>> at
>> org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:116)
>> at
>> org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:421)
>> at
>> org.apache.coyote.http11.AbstractHttp11Processor.process(AbstractHttp11Processor.java:1074)
>> at
>> org.apache.coyote.AbstractProtocol$AbstractConnectionHandler.process(AbstractProtocol.java:611)
>> at
>> org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1739)
>> at
>> org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.run(NioEndpoint.java:1698)
>> at
>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>> at
>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>> at
>> org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
>> at java.lang.Thread.run(Thread.java:745)
>>
>> Thanks.
>>
>> --
>> Thanuja Uruththirakodeeswaran
>> Software Engineer
>> WSO2 Inc.;http://wso2.com
>> lean.enterprise.middleware
>>
>> mobile: +94 774363167
>>
>
>
>
> --
> *Niranda Perera*
> Software Engineer, WSO2 Inc.
> Mobile: +94-71-554-8430
> Twitter: @n1r44 <https://twitter.com/N1R44>
> https://pythagoreanscript.wordpress.com/
>
--
Thanuja Uruththirakodeeswaran
Software Engineer
WSO2 Inc.;http://wso2.com
lean.enterprise.middleware
mobile: +94 774363167
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
~
~ WSO2 Inc. licenses this file to you under the Apache License,
~ Version 2.0 (the "License"); you may not use this file except
~ in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<analytics-dataservice-configuration>
<!-- The name of the primary record store -->
<primaryRecordStore>EVENT_STORE</primaryRecordStore>
<!-- The name of the index staging record store -->
<indexStagingRecordStore>INDEX_STAGING_STORE</indexStagingRecordStore>
<!-- Analytics File System - properties related to index storage implementation -->
<analytics-file-system>
<implementation>org.wso2.carbon.analytics.datasource.rdbms.RDBMSAnalyticsFileSystem</implementation>
<properties>
<!-- the data source name mentioned in data sources configuration -->
<property name="datasource">WSO2_ANALYTICS_FS_DB</property>
</properties>
</analytics-file-system>
<!-- Analytics Record Store - properties related to record storage implementation -->
<analytics-record-store name="EVENT_STORE">
<implementation>org.wso2.carbon.analytics.datasource.hbase.HBaseAnalyticsRecordStore</implementation>
<properties>
<property name="datasource">WSO2_ANALYTICS_EVENT_STORE_DB_HBASE</property>
<property name="category">large_dataset_optimized</property>
</properties>
</analytics-record-store>
<analytics-record-store name="INDEX_STAGING_STORE">
<implementation>org.wso2.carbon.analytics.datasource.hbase.HBaseAnalyticsRecordStore</implementation>
<properties>
<property name="datasource">WSO2_ANALYTICS_EVENT_STORE_DB_HBASE</property>
<property name="category">limited_dataset_optimized</property>
</properties>
</analytics-record-store>
<analytics-record-store name = "PROCESSED_DATA_STORE">
<implementation>org.wso2.carbon.analytics.datasource.rdbms.RDBMSAnalyticsRecordStore</implementation>
<properties>
<property name="datasource">WSO2_ANALYTICS_PROCESSED_DATA_STORE_DB</property>
<property name="category">large_dataset_optimized</property>
</properties>
</analytics-record-store>
<!-- The data indexing analyzer implementation -->
<analytics-lucene-analyzer>
<implementation>org.apache.lucene.analysis.standard.StandardAnalyzer</implementation>
</analytics-lucene-analyzer>
<!-- The maximum number of threads used for indexing per node, -1 signals to aute detect the optimum value,
where it would be equal to (number of CPU cores in the system - 1) -->
<indexingThreadCount>-1</indexingThreadCount>
<!-- The number of index shards, should be equal or higher to the number of indexing nodes that is going to be working,
ideal count being 'number of indexing nodes * [CPU cores used for indexing per node]' -->
<shardCount>6</shardCount>
<!-- Data purging related configuration -->
<analytics-data-purging>
<!-- Below entry will indicate purging is enable or not. If user wants to enable data purging for cluster then this property
need to be enable in all nodes -->
<purging-enable>false</purging-enable>
<cron-expression>0 0 0 * * ?</cron-expression>
<!-- Tables that need include to purging. Use regex expression to specify the table name that need include to purging.-->
<purge-include-table-patterns>
<table>.*</table>
<!--<table>.*jmx.*</table>-->
</purge-include-table-patterns>
<!-- All records that insert before the specified retention time will be eligible to purge -->
<data-retention-days>365</data-retention-days>
</analytics-data-purging>
<!-- Receiver/Indexing flow-control configuration -->
<analytics-receiver-indexing-flow-control enabled = "true">
<!-- maximum number of records that can be in index staging area before receiving is throttled -->
<recordReceivingHighThreshold>10000</recordReceivingHighThreshold>
<!-- the limit on number of records to be lower than, to reduce throttling -->
<recordReceivingLowThreshold>5000</recordReceivingLowThreshold>
</analytics-receiver-indexing-flow-control>
</analytics-dataservice-configuration>
<datasources-configuration xmlns:svns="http://org.wso2.securevault/configuration">
<providers>
<provider>org.wso2.carbon.ndatasource.rdbms.RDBMSDataSourceReader</provider>
</providers>
<datasources>
<datasource>
<name>WSO2_CARBON_DB</name>
<description>The datasource used for registry and user manager</description>
<jndiConfig>
<name>jdbc/WSO2CarbonDB</name>
</jndiConfig>
<definition type="RDBMS">
<configuration>
<url>jdbc:h2:repository/database/WSO2CARBON_DB;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=60000</url>
<username>wso2carbon</username>
<password>wso2carbon</password>
<driverClassName>org.h2.Driver</driverClassName>
<maxActive>50</maxActive>
<maxWait>60000</maxWait>
<testOnBorrow>true</testOnBorrow>
<validationQuery>SELECT 1</validationQuery>
<validationInterval>30000</validationInterval>
<defaultAutoCommit>false</defaultAutoCommit>
</configuration>
</definition>
</datasource>
<!-- For an explanation of the properties, see: http://people.apache.org/~fhanik/jdbc-pool/jdbc-pool.html -->
<!--datasource>
<name>SAMPLE_DATA_SOURCE</name>
<jndiConfig>
<name></name>
<environment>
<property name="java.naming.factory.initial"></property>
<property name="java.naming.provider.url"></property>
</environment>
</jndiConfig>
<definition type="RDBMS">
<configuration>
<defaultAutoCommit></defaultAutoCommit>
<defaultReadOnly></defaultReadOnly>
<defaultTransactionIsolation>NONE|READ_COMMITTED|READ_UNCOMMITTED|REPEATABLE_READ|SERIALIZABLE</defaultTransactionIsolation>
<defaultCatalog></defaultCatalog>
<username></username>
<password svns:secretAlias="WSO2.DB.Password"></password>
<maxActive></maxActive>
<maxIdle></maxIdle>
<initialSize></initialSize>
<maxWait></maxWait>
<dataSourceClassName>com.mysql.jdbc.jdbc2.optional.MysqlXADataSource</dataSourceClassName>
<dataSourceProps>
<property name="url">jdbc:mysql://localhost:3306/Test1</property>
<property name="user">root</property>
<property name="password">123</property>
</dataSourceProps>
</configuration>
</definition>
</datasource-->
</datasources>
</datasources-configuration>
<datasources-configuration>
<providers>
<provider>org.wso2.carbon.ndatasource.rdbms.RDBMSDataSourceReader</provider>
<provider>org.wso2.carbon.datasource.reader.hadoop.HDFSDataSourceReader</provider>
<provider>org.wso2.carbon.datasource.reader.hadoop.HBaseDataSourceReader</provider>
<provider>org.wso2.carbon.datasource.reader.cassandra.CassandraDataSourceReader</provider>
</providers>
<datasources>
<datasource>
<name>WSO2_ANALYTICS_FS_DB</name>
<description>The datasource used for analytics file system</description>
<definition type="RDBMS">
<configuration>
<url>jdbc:mysql://127.0.0.1:3306/ANALYTICS_FS_DB</url>
<username>root</username>
<password>root</password>
<driverClassName>com.mysql.jdbc.Driver</driverClassName>
<maxActive>50</maxActive>
<maxWait>60000</maxWait>
<testOnBorrow>true</testOnBorrow>
<validationQuery>SELECT 1</validationQuery>
<validationInterval>30000</validationInterval>
<defaultAutoCommit>false</defaultAutoCommit>
</configuration>
</definition>
</datasource>
<!-- <datasource>
<name>WSO2_ANALYTICS_EVENT_STORE_DB</name>
<description>The datasource used for analytics record store</description>
<definition type="RDBMS">
<configuration>
<url>jdbc:h2:repository/database/ANALYTICS_EVENT_STORE;AUTO_SERVER=TRUE;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=60000</url>
<username>wso2carbon</username>
<password>wso2carbon</password>
<driverClassName>org.h2.Driver</driverClassName>
<maxActive>50</maxActive>
<maxWait>60000</maxWait>
<testOnBorrow>true</testOnBorrow>
<validationQuery>SELECT 1</validationQuery>
<validationInterval>30000</validationInterval>
<defaultAutoCommit>false</defaultAutoCommit>
</configuration>
</definition>
</datasource> -->
<!-- Sample datasource implementation for HBase Analytics RecordStore-->
<datasource>
<name>WSO2_ANALYTICS_EVENT_STORE_DB_HBASE</name>
<description>The datasource used for analytics file system</description>
<jndiConfig>
<name>jdbc/WSO2HBaseDB</name>
</jndiConfig>
<definition type="HBASE">
<configuration>
<property>
<name>hbase.master</name>
<value>localhost:16000</value>
</property>
</configuration>
</definition>
</datasource>
<datasource>
<name>WSO2_ANALYTICS_PROCESSED_DATA_STORE_DB</name>
<description>The datasource used for analytics record store</description>
<definition type="RDBMS">
<configuration>
<url>jdbc:mysql://127.0.0.1:3306/ANALYTICS_PROCESSED_DATA_STORE</url>
<username>root</username>
<password>root</password>
<driverClassName>com.mysql.jdbc.Driver</driverClassName>
<maxWait>60000</maxWait>
<testOnBorrow>true</testOnBorrow>
<validationQuery>SELECT 1</validationQuery>
<validationInterval>30000</validationInterval>
<defaultAutoCommit>false</defaultAutoCommit>
</configuration>
</definition>
</datasource>
<!-- Sample datasource implementation for HDFS Analytics FileSystem-->
<!--<datasource>
<name>WSO2_ANALYTICS_FS_DB_HDFS</name>
<description>The datasource used for analytics file system</description>
<jndiConfig>
<name>jdbc/WSO2HDFSDB</name>
</jndiConfig>
<definition type="HDFS">
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/dfs/data</value>
</property>
<property>
<name>fs.hdfs.impl</name>
<value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
</property>
<property>
<name>fs.file.impl</name>
<value>org.apache.hadoop.fs.LocalFileSystem</value>
</property>
</configuration>
</definition>
</datasource>-->
<!-- Sample datasource implementation for HBase Analytics RecordStore-->
<!--<datasource>
<name>WSO2_ANALYTICS_RS_DB_HBASE</name>
<description>The datasource used for analytics file system</description>
<jndiConfig>
<name>jdbc/WSO2HBaseDB</name>
</jndiConfig>
<definition type="HBASE">
<configuration>
<property>
<name>hbase.master</name>
<value>localhost:60000</value>
</property>
<property>
<name>fs.hdfs.impl</name>
<value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
</property>
<property>
<name>fs.file.impl</name>
<value>org.apache.hadoop.fs.LocalFileSystem</value>
</property>
</configuration>
</definition>
</datasource>-->
<!-- Sample datasource implementation for Cassandra -->
<!--<datasource>
<name>WSO2_ANALYTICS_DS_CASSANDRA</name>
<description>The Cassandra datasource used for analytics</description>
<definition type="CASSANDRA">
<configuration>
<contactPoints>localhost</contactPoints>
<port>9042</port>
<username>admin</username>
<password>admin</password>
<clusterName>cluster1</clusterName>
<compression>none</compression>
<poolingOptions>
<coreConnectionsPerHost hostDistance="LOCAL">8</coreConnectionsPerHost>
<maxSimultaneousRequestsPerHostThreshold hostDistance="LOCAL">1024</maxSimultaneousRequestsPerHostThreshold>
</poolingOptions>
<queryOptions>
<fetchSize>5000</fetchSize>
<consistencyLevel>ONE</consistencyLevel>
<serialConsistencyLevel>SERIAL</serialConsistencyLevel>
</queryOptions>
<socketOptions>
<keepAlive>false</keepAlive>
<sendBufferSize>150000</sendBufferSize>
<connectTimeoutMillis>12000</connectTimeoutMillis>
<readTimeoutMillis>12000</readTimeoutMillis>
</socketOptions>
</configuration>
</definition>
</datasource>-->
</datasources>
</datasources-configuration>
_______________________________________________
Dev mailing list
[email protected]
http://wso2.org/cgi-bin/mailman/listinfo/dev