2017-09-02 23 views
0

的HelloWorld JNI函数I,随后的指令的Call c function from Java调用从字计数的hadoop例如给错误java.lang.UnsatisfiedLinkError中

创建JNI为C++函数,HelloWorld示例。它可以很好地作为Java程序的一部分。我的程序如下:

//////////// 
class HelloWorld { 
private native void myprint(char c); 

public void from_java() { 
    myprint('I'); 
} 

public static void main(String[] args) { 
    new HelloWorld().from_java(); 
} 

static { 
    System.loadLibrary("HelloWorld"); 
} 
} 

/* DO NOT EDIT THIS FILE - it is machine generated */ 
#include <jni.h> 
/* Header for class HelloWorld */ 

#ifndef _Included_HelloWorld 
#define _Included_HelloWorld 
#ifdef __cplusplus 
extern "C" { 
#endif 

JNIEXPORT void JNICALL Java_HelloWorld_myprint(JNIEnv *, jobject, char); 

#ifdef __cplusplus 
} 
#endif 
#endif 

#include <stdio.h> 
#include "HelloWorld.h" 

JNIEXPORT void JNICALL Java_HelloWorld_myprint(JNIEnv *env, jobject obj, char c) { 
printf("Hello World! %c", c); 
printf("\n"); 

return; 
} 

//////////// 

myprint(..)可以称为罚款从Java。

但是,当我将它作为WordCount hadoop示例的一部分时,则出现错误。

我Hadoop的程序是:

//////////// 
import java.io.IOException; 
import java.util.StringTokenizer; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.Job; 
import org.apache.hadoop.mapreduce.Mapper; 
import org.apache.hadoop.mapreduce.Reducer; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 


public class WordCount { 


public static class TokenizerMapper 
    extends Mapper<Object, Text, Text, IntWritable>{ 

    class HelloWorld { 

    private native void myprint(char c); 

     public void mymain() { 
     myprint('I'); 
     } 
    } 

static { 
    System.loadLibrary("HelloWorld"); 
} 

private final static IntWritable one = new IntWritable(1); 
private Text word = new Text(); 

public void map(Object key, Text value, Context context 
       ) throws IOException, InterruptedException { 
    StringTokenizer itr = new StringTokenizer(value.toString()); 

    HelloWorld obj = new HelloWorld(); 
    obj.mymain(); 

    while (itr.hasMoreTokens()) { 
    word.set(itr.nextToken()); 
    context.write(word, one); 
    } 
} 
} 

public static class IntSumReducer 
    extends Reducer<Text,IntWritable,Text,IntWritable> { 
private IntWritable result = new IntWritable(); 

public void reduce(Text key, Iterable<IntWritable> values, 
        Context context 
        ) throws IOException, InterruptedException { 
    int sum = 0; 
    for (IntWritable val : values) { 
    sum += val.get(); 
    } 
    result.set(sum); 
    context.write(key, result); 
} 
} 

public static void main(String[] args) throws Exception { 

Configuration conf = new Configuration(); 

Job job = Job.getInstance(conf, "word count"); 
job.setJarByClass(WordCount.class); 
job.setMapperClass(TokenizerMapper.class); 
job.setCombinerClass(IntSumReducer.class); 
job.setReducerClass(IntSumReducer.class); 
job.setOutputKeyClass(Text.class); 
job.setOutputValueClass(IntWritable.class); 
FileInputFormat.addInputPath(job, new Path(args[0])); 
FileOutputFormat.setOutputPath(job, new Path(args[1])); 

System.exit(job.waitForCompletion(true) ? 0 : 1); 
} 
} 
//////////// 

当我运行它,我得到以下错误:

17/09/02 09:36:57 INFO mapreduce.Job: Job job_local737469568_0001 failed with state FAILED due to: NA 
17/09/02 09:36:57 WARN mapred.LocalJobRunner: job_local737469568_0001 
java.lang.Exception: java.lang.UnsatisfiedLinkError:  WordCount$TokenizerMapper$HelloWorld.myprint(C)V 
at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:489) 
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:549) 
Caused by: java.lang.UnsatisfiedLinkError: WordCount$TokenizerMapper$HelloWorld.myprint(C)V 
at WordCount$TokenizerMapper$HelloWorld.myprint(Native Method) 
at WordCount$TokenizerMapper$HelloWorld.mymain(WordCount.java:26) 
at WordCount$TokenizerMapper.map(WordCount.java:42) 
at WordCount$TokenizerMapper.map(WordCount.java:18) 
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146) 
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787) 
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) 
at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:270) 
+0

我能解决这个问题。它必须完成被调用函数的完整路径。我已经与实际的Hadoop程序分开创建了HelloWorld.h/.cpp,因此完整路径不匹配。一旦我解决了这个问题就解决了。 –

+0

如果是这样的话,请将其作为本问题的答案,并将其标记为已解决 –

+0

@AbRan您需要将您的解决方案编写为答案并接受此答案 – Sergey

回答

0
//// Need to do following 
//// javah -jni WordCount 
//// This will generate WordCount_TokenizerMapper_HelloWorld.h as follows 

/* DO NOT EDIT THIS FILE - it is machine generated */ 
#include <jni.h> 
/* Header for class WordCount_TokenizerMapper_HelloWorld */ 

#ifndef _Included_WordCount_TokenizerMapper_HelloWorld 
#define _Included_WordCount_TokenizerMapper_HelloWorld 
#ifdef __cplusplus 
extern "C" { 
#endif 
/* 
* Class:  WordCount_TokenizerMapper_HelloWorld 
* Method: myprint 
* Signature: (C)V 
*/ 

//// Note the function signature here. This is how JNI expects to find 
//// this function in the .so created for C++ code. 
//// BOTTOMLINE: Call javah on the actual Java src file from which you 
////    want to call C++ function. DON'T DO IT SEPARATELY. 
JNIEXPORT void JNICALL Java_WordCount_00024TokenizerMapper_00024HelloWorld_myprint(JNIEnv *, jobject, jchar); 

#ifdef __cplusplus 
} 
#endif 
#endif