Thanks for your reply I am sending my all conf files, please check
attachment, I am using igfs://igfs@192.168.1.5:10500

On Fri, Apr 1, 2016 at 12:06 PM, Vladimir Ozerov <voze...@gridgain.com>
wrote:

> Hi,
>
> Please provide your XML configuration and IGFS URI you use.
>
> Vladimir.
>
> On Thu, Mar 31, 2016 at 1:07 PM, pawanpawar <pawarem...@gmail.com> wrote:
>
> > Hi all, when I try to run the hive cmd I got this error.
> >
> > pawanpawar@cloud:~$ hive
> > Exception in thread "main" java.lang.RuntimeException:
> java.io.IOException:
> > Failed to get file status [path=/tmp/hive]
> >         at
> >
> org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:444)
> >         at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:672)
> >         at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:616)
> >         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> >         at
> >
> >
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> >         at
> >
> >
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> >         at java.lang.reflect.Method.invoke(Method.java:497)
> >         at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
> >         at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
> > Caused by: java.io.IOException: Failed to get file status
> [path=/tmp/hive]
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.cast(HadoopIgfsUtils.java:132)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper.withReconnectHandling(HadoopIgfsWrapper.java:327)
> >         at
> > org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper.info
> > (HadoopIgfsWrapper.java:120)
> >         at
> >
> >
> org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.getFileStatus(IgniteHadoopFileSystem.java:989)
> >         at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1400)
> >         at
> >
> >
> org.apache.hadoop.hive.ql.session.SessionState.createRootHDFSDir(SessionState.java:520)
> >         at
> >
> >
> org.apache.hadoop.hive.ql.session.SessionState.createSessionDirs(SessionState.java:478)
> >         at
> >
> org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:430)
> >         ... 8 more
> > Caused by: class org.apache.ignite.IgniteCheckedException: Failed to get
> > file status [path=/tmp/hive]
> >         at
> > org.apache.ignite.internal.util.IgniteUtils.cast(IgniteUtils.java:7005)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.get0(GridFutureAdapter.java:166)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.get(GridFutureAdapter.java:115)
> >         at
> > org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc.info
> > (HadoopIgfsOutProc.java:209)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper$2.apply(HadoopIgfsWrapper.java:123)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper$2.apply(HadoopIgfsWrapper.java:120)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper.withReconnectHandling(HadoopIgfsWrapper.java:310)
> >         ... 14 more
> > Caused by: class org.apache.ignite.igfs.IgfsException: Failed to get file
> > status [path=/tmp/hive]
> >         at
> >
> >
> org.apache.ignite.internal.igfs.common.IgfsControlResponse.throwError(IgfsControlResponse.java:294)
> >         at
> >
> >
> org.apache.ignite.internal.igfs.common.IgfsControlResponse.throwError(IgfsControlResponse.java:303)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc$1.apply(HadoopIgfsOutProc.java:509)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc$1.apply(HadoopIgfsOutProc.java:503)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureChainListener.apply(GridFutureChainListener.java:54)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureChainListener.apply(GridFutureChainListener.java:28)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.notifyListener(GridFutureAdapter.java:262)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.notifyListeners(GridFutureAdapter.java:250)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.onDone(GridFutureAdapter.java:380)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.onDone(GridFutureAdapter.java:346)
> >         at
> >
> >
> org.apache.ignite.internal.util.future.GridFutureAdapter.onDone(GridFutureAdapter.java:323)
> >         at
> >
> >
> org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsIpcIo$ReaderThread.run(HadoopIgfsIpcIo.java:575)
> >
> >
> >
> >
> > --
> > View this message in context:
> >
> http://apache-ignite-developers.2346864.n4.nabble.com/hive-not-working-with-ignite-igfs-tp8190.html
> > Sent from the Apache Ignite Developers mailing list archive at
> Nabble.com.
> >
>



-- 
*Thanks & Regards*
*Pawan Pawar*
*Mobile: +91 9993585256*
*Email: pawarem...@gmail.com <pawarem...@gmail.com>*
*Skype: pawarskype*
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<!--
	<property>
          <name>fs.defaultFS</name>
          <value>hdfs://192.168.1.5:9000</value>
    	</property>
-->		
    <property>
        <name>fs.defaultFS</name>
        <value>igfs://igfs@192.168.1.5</value>
    </property>

    <!-- Set Hadoop 1.* file system implementation class for IGFS. -->
    <property>
        <name>fs.igfs.impl</name>
        <value>org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem</value>
    </property>

    <!-- Set Hadoop 2.* file system implementation class for IGFS. -->
    <property>
        <name>fs.AbstractFileSystem.igfs.impl</name>
        <value>org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem</value>
    </property>

    <!-- Disallow data node replacement since it does not make sense for IGFS nodes. -->
    <property>
        <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
        <value>NEVER</value>
    </property>

	<property>
	  <name>hadoop.tmp.dir</name>
	  <value>/app/hadoop/tmp</value>
	  <description>A base for other temporary directories.</description>
	</property>

	<property>
	  <name>hadoop.proxyuser.pawanpawar.hosts</name>
	  <value>*</value>
	</property>

	<property>
	  <name>hadoop.proxyuser.pawanpawar.groups</name>
	  <value>*</value>
	</property>
</configuration>
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!--
  Licensed to the Apache Software Foundation (ASF) under one or more
  contributor license agreements.  See the NOTICE file distributed with
  this work for additional information regarding copyright ownership.
  The ASF licenses this file to You under the Apache License, Version 2.0
  (the "License"); you may not use this file except in compliance with
  the License.  You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
-->

<!--
    This template file contains settings needed to run Apache Hadoop jobs
    with Apache Ignite In-Memory Accelerator.

    You can replace '$HADOOP_HOME/etc/hadoop/mapred-site.xml' file with this one
    to run jobs on localhost (local node can be a part of distributed cluster though).
    To run jobs on remote host you have to change jobtracker address to the REST address
    of any running Ignite node.

    Note that Ignite jars must be in Apache Hadoop client classpath to work
    with this configuration.

    Run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop client setup.
-->

<configuration>
    <!--
        Framework name must be set to 'ignite'.
    -->
    <property>
        <name>mapreduce.framework.name</name>
        <value>ignite</value>
    </property>

    <!--
        Job tracker address must be set to the REST address of any running Ignite node.
    -->
    <property>
        <name>mapreduce.jobtracker.address</name>
        <value>192.168.1.5:11211</value>
    </property>

    <!-- Parameters for job tuning. -->
    <!--
    <property>
        <name>mapreduce.job.reduces</name>
        <value>1</value>
    </property>

    <property>
        <name>mapreduce.job.maps</name>
        <value>4</value>
    </property>
    -->
	
<property>
  <name>mapreduce.jobtracker.staging.root.dir</name>
  <value>${hadoop.tmp.dir}/mapred/staging</value>
</property>

</configuration>
<?xml version="1.0" encoding="UTF-8"?>

<!--
  Licensed to the Apache Software Foundation (ASF) under one or more
  contributor license agreements.  See the NOTICE file distributed with
  this work for additional information regarding copyright ownership.
  The ASF licenses this file to You under the Apache License, Version 2.0
  (the "License"); you may not use this file except in compliance with
  the License.  You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
-->

<!--
    Ignite Spring configuration file.

    When starting a standalone Ignite node, you need to execute the following command:
    {IGNITE_HOME}/bin/ignite.{bat|sh} path-to-this-file/default-config.xml

    When starting Ignite from Java IDE, pass path to this file into Ignition:
    Ignition.start("path-to-this-file/default-config.xml");
-->
<beans xmlns="http://www.springframework.org/schema/beans";
       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; xmlns:util="http://www.springframework.org/schema/util";
       xsi:schemaLocation="http://www.springframework.org/schema/beans
       http://www.springframework.org/schema/beans/spring-beans.xsd
       http://www.springframework.org/schema/util
       http://www.springframework.org/schema/util/spring-util.xsd";>

    <!--
        Optional description.
    -->
    <description>
        Spring file for Ignite node configuration with IGFS and Apache Hadoop map-reduce support enabled.
        Ignite node will start with this configuration by default.
    </description>

    <!-- Initialize property configurer so we can reference environment variables. -->
    <bean id="propertyConfigurer" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
        <property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_FALLBACK"/>
        <property name="searchSystemEnvironment" value="true"/>
    </bean>

    <!-- Abstract IGFS file system configuration to be used as a template. -->
    <bean id="igfsCfgBase" class="org.apache.ignite.configuration.FileSystemConfiguration" abstract="true">
        <!-- Must correlate with cache affinity mapper. -->
        <property name="blockSize" value="#{128 * 1024}"/>
        <property name="perNodeBatchSize" value="512"/>
        <property name="perNodeParallelBatchCount" value="16"/>

        <property name="prefetchBlocks" value="32"/>
    </bean>

    <!-- Abstract cache configuration for IGFS file data to be used as a template. -->
    <bean id="dataCacheCfgBase" class="org.apache.ignite.configuration.CacheConfiguration" abstract="true">
        <property name="cacheMode" value="PARTITIONED"/>
        <property name="atomicityMode" value="TRANSACTIONAL"/>
        <property name="writeSynchronizationMode" value="FULL_SYNC"/>
        <property name="backups" value="0"/>
        <property name="affinityMapper">
            <bean class="org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper">
                <!-- How many sequential blocks will be stored on the same node. -->
                <constructor-arg value="512"/>
            </bean>
        </property>
    </bean>

    <!-- Abstract cache configuration for IGFS metadata to be used as a template. -->
    <bean id="metaCacheCfgBase" class="org.apache.ignite.configuration.CacheConfiguration" abstract="true">
        <property name="cacheMode" value="REPLICATED"/>
        <property name="atomicityMode" value="TRANSACTIONAL"/>
        <property name="writeSynchronizationMode" value="FULL_SYNC"/>
    </bean>

    <!-- Configuration of Ignite node. -->
    <bean id="grid.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
        <!-- Apache Hadoop Accelerator configuration. -->
        <property name="hadoopConfiguration">
            <bean class="org.apache.ignite.configuration.HadoopConfiguration">
                <!-- Information about finished jobs will be kept for 30 seconds. -->
                <property name="finishedJobInfoTtl" value="30000"/>
            </bean>
        </property>

        <!-- This port will be used by Apache Hadoop client to connect to Ignite node as if it was a job tracker. -->
        <property name="connectorConfiguration">
            <bean class="org.apache.ignite.configuration.ConnectorConfiguration">
                <property name="port" value="11211"/>
            </bean>
        </property>


        <!--
            Configure one IGFS file system instance named "igfs" on this node.
                            <constructor-arg name="cfgPath" value="/usr/local/apache/hadoop2/etc/hadoop/core-site-hdfs.xml"/>
                            <constructor-arg name="userName" value="pawanpawar"/>
        -->
        <property name="fileSystemConfiguration">
            <list>
                <bean class="org.apache.ignite.configuration.FileSystemConfiguration" parent="igfsCfgBase">
                    <property name="name" value="igfs"/>
		    <property name="ipcEndpointEnabled" value="true"/>
                    <!-- Caches with these names must be configured. -->
                    <property name="metaCacheName" value="igfs-meta"/>
                    <property name="dataCacheName" value="igfs-data"/>

                    <!-- Configure TCP endpoint for communication with the file system instance. -->
                    <property name="ipcEndpointConfiguration">
                        <bean class="org.apache.ignite.igfs.IgfsIpcEndpointConfiguration">
                            <property name="type" value="TCP" />
                        </bean>
                    </property>

		    <property name="defaultMode" value="DUAL_ASYNC" />
                    <property name="secondaryFileSystem">
                        <bean class="org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem">
                            <constructor-arg name="uri" value="hdfs://192.168.1.5:9000"/>
                            <constructor-arg name="cfgPath"><null/></constructor-arg>
			    <constructor-arg name="userName" value="pawanpawar"/>
                        </bean>
                    </property>

                </bean>
            </list>
        </property>

        <!-- Caches needed by IGFS. -->
        <property name="cacheConfiguration">
            <list>
                <!-- File system metadata cache. -->
                <bean class="org.apache.ignite.configuration.CacheConfiguration" parent="metaCacheCfgBase">
                    <property name="name" value="igfs-meta"/>
                </bean>

                <!-- File system files data cache. -->
                <bean class="org.apache.ignite.configuration.CacheConfiguration" parent="dataCacheCfgBase">
                    <property name="name" value="igfs-data"/>
                </bean>
            </list>
        </property>

        <!-- Disable events. -->
        <property name="includeEventTypes">
            <list>
                <util:constant static-field="org.apache.ignite.events.EventType.EVT_TASK_FAILED"/>
                <util:constant static-field="org.apache.ignite.events.EventType.EVT_TASK_FINISHED"/>
                <util:constant static-field="org.apache.ignite.events.EventType.EVT_JOB_MAPPED"/>
            </list>
        </property>

        <!-- TCP discovery SPI can be configured with list of addresses if multicast is not available. -->
        
        <property name="discoverySpi">
            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
                <property name="ipFinder">
                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
                        <property name="addresses">
                            <list>
                                <value>192.168.1.5:47500..47509</value>
                            </list>
                        </property>
                    </bean>
                </property>
            </bean>
        </property>
        
    </bean>
</beans>

Reply via email to