Hope this version can attract other's attention

Hadoop Version:  0.15.0
JDK version: Sun JDK 6.0.3
Platform: Ubuntu 7.10
IDE:   Eclipse 3.2
Code :
public class HadoopWrite {

    /**
     * @param args
     */
    public static void main(String[] args) throws IOException{
        Configuration dfsconf = new Configuration();
        FileSystem dfs;
        dfs = FileSystem.get(dfsconf);
        Path inFile = new Path("/nutch/out");
        Path outFile = new Path("ryan/test");
        dfs.copyFromLocalFile(inFile, outFile);

    }

}

Exception is below:

Exception in thread "main" org.apache.hadoop.ipc.RemoteException:
java.io.IOException: DIR* NameSystem.startFile: Unable to add file to
namespace.
    at org.apache.hadoop.dfs.FSNamesystem.startFileInternal(
FSNamesystem.java:931)
    at org.apache.hadoop.dfs.FSNamesystem.startFile(FSNamesystem.java:806)
    at org.apache.hadoop.dfs.NameNode.create(NameNode.java:276)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(
NativeMethodAccessorImpl.java:39)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(
DelegatingMethodAccessorImpl.java:25)
    at java.lang.reflect.Method.invoke(Method.java:597)
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:379)
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:596)

    at org.apache.hadoop.ipc.Client.call(Client.java:482)
    at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:184)
    at org.apache.hadoop.dfs.$Proxy0.create(Unknown Source)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(
NativeMethodAccessorImpl.java:39)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(
DelegatingMethodAccessorImpl.java:25)
    at java.lang.reflect.Method.invoke(Method.java:597)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(
RetryInvocationHandler.java:82)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(
RetryInvocationHandler.java:59)
    at org.apache.hadoop.dfs.$Proxy0.create(Unknown Source)
    at org.apache.hadoop.dfs.DFSClient$DFSOutputStream.<init>(DFSClient.java
:1432)
    at org.apache.hadoop.dfs.DFSClient.create(DFSClient.java:376)
    at org.apache.hadoop.dfs.DistributedFileSystem.create(
DistributedFileSystem.java:121)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:353)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:260)
    at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:139)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java
:826)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java
:814)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java
:795)
    at edu.insun.HadoopWrite.main(HadoopWrite.java:20)

Hadoopsite.xml:

> <configuration>
>
> <property>
>   <name>fs.default.name</name>
>   <value>hdfs://node01:9000</value>
>   <description>
>     The name of the default file system. Either the literal string
>     "local" or a host:port for NDFS.
>   </description>
> </property>
>
> <property>
>   <name>mapred.job.tracker </name>
>   <value>node01:9001</value>
>   <description>
>     The host and port that the MapReduce job tracker runs at. If
>     "local", then jobs are run in-process as a single map and
>     reduce task.
>   </description>
> </property>
>
> <property>
>   <name>mapred.map.tasks</name>
>   <value>4</value>
>   <description>
>     define mapred.map tasks to be number of slave hosts
>   </description>
> </property>
>
> <property>
>   <name>mapred.reduce.tasks</name>
>   <value>4</value>
>   <description>
>     define mapred.reduce tasks to be number of slave hosts
>   </description>
> </property>
>
> <property>
>   <name>dfs.name.dir</name>
>   <value>/nutch/hdfs/name</value>
> </property>
>
> <property>
>   <name>dfs.data.dir</name>
>   <value>/nutch/hdfs/data</value>
> </property>
>
> <property>
>   <name>mapred.system.dir</name>
>   <value>/nutch/hdfs/mapreduce/system</value>
> </property>
>
> <property>
>   <name>mapred.local.dir</name>
>   <value>/nutch/hdfs/mapreduce/local</value>
> </property>
>
> <property>
>   <name>dfs.replication</name>
>   <value>1</value>
> </property>
>
> </configuration>
>
>
>
> On Nov 30, 2007 10:57 PM, Arun C Murthy < [EMAIL PROTECTED]> wrote:
>
> > Ryan,
> >
> > On Fri, Nov 30, 2007 at 10:48:30PM +0800, Ryan Wang wrote:
> > >Hi,
> > >I can communicate with the file system via shell command, and it worked
> > >corretly.
> > >But when I try to write program to write file to the file system, it
> > failed.
> > >
> >
> > Could you provide more info on the errors, your configuration,
> > hadoop-version etc.?
> >
> > http://wiki.apache.org/lucene-hadoop/Help
> >
> > Arun
> > >public class HadoopDFSFileReadWrite {
> > >
> > >
> > >    public static void main(String[] argv) throws IOException {
> > >
> > >        Configuration dfsconf = new Configuration();
> > >        FileSystem dfs = FileSystem.get(dfsconf);
> > >
> > >        Path inFile = new Path(argv[0]);
> > >        Path outFile = new Path(argv[1]);
> > >
> > >        dfs.copyFromLocalFile(inFile, outFile);
> > >    }
> > >}
> > >
> > >argv[0]=nutch/search/bin/javalibTest.tar.gz argv[1]=ryan/test.tar.gz
> > >The program write the javalibTest.tar.gz  to the Project's
> > >Dir/ryan/test.tar.gz
> > >I also placed the file modified hadoop-site.xml to  the Project 's
> > Path?
> > >I don't know why? anyone could help me out ?
> > >
> > >Thanks
> > >Ryan
> >
>
>

Reply via email to