2017-09-02 1 views
0

je suivais les instructions à Call c function from JavaAppel fonction HelloWorld JNI de l'exemple WordCount Hadoop donnant erreur java.lang.UnsatisfiedLinkError

pour créer un JNI pour une fonction C++, par exemple HelloWorld. Cela fonctionne bien dans le cadre du programme Java. Mes programmes sont énumérés ci-dessous:

//////////// 
class HelloWorld { 
private native void myprint(char c); 

public void from_java() { 
    myprint('I'); 
} 

public static void main(String[] args) { 
    new HelloWorld().from_java(); 
} 

static { 
    System.loadLibrary("HelloWorld"); 
} 
} 

/* DO NOT EDIT THIS FILE - it is machine generated */ 
#include <jni.h> 
/* Header for class HelloWorld */ 

#ifndef _Included_HelloWorld 
#define _Included_HelloWorld 
#ifdef __cplusplus 
extern "C" { 
#endif 

JNIEXPORT void JNICALL Java_HelloWorld_myprint(JNIEnv *, jobject, char); 

#ifdef __cplusplus 
} 
#endif 
#endif 

#include <stdio.h> 
#include "HelloWorld.h" 

JNIEXPORT void JNICALL Java_HelloWorld_myprint(JNIEnv *env, jobject obj, char c) { 
printf("Hello World! %c", c); 
printf("\n"); 

return; 
} 

//////////// 

myPrint (..) peut être appelé bien de Java.

Cependant, quand je l'ai fait partie de l'exemple hadoop de WordCount alors j'ai une erreur.

Mon programme Hadoop est:

//////////// 
import java.io.IOException; 
import java.util.StringTokenizer; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.Job; 
import org.apache.hadoop.mapreduce.Mapper; 
import org.apache.hadoop.mapreduce.Reducer; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 


public class WordCount { 


public static class TokenizerMapper 
    extends Mapper<Object, Text, Text, IntWritable>{ 

    class HelloWorld { 

    private native void myprint(char c); 

     public void mymain() { 
     myprint('I'); 
     } 
    } 

static { 
    System.loadLibrary("HelloWorld"); 
} 

private final static IntWritable one = new IntWritable(1); 
private Text word = new Text(); 

public void map(Object key, Text value, Context context 
       ) throws IOException, InterruptedException { 
    StringTokenizer itr = new StringTokenizer(value.toString()); 

    HelloWorld obj = new HelloWorld(); 
    obj.mymain(); 

    while (itr.hasMoreTokens()) { 
    word.set(itr.nextToken()); 
    context.write(word, one); 
    } 
} 
} 

public static class IntSumReducer 
    extends Reducer<Text,IntWritable,Text,IntWritable> { 
private IntWritable result = new IntWritable(); 

public void reduce(Text key, Iterable<IntWritable> values, 
        Context context 
        ) throws IOException, InterruptedException { 
    int sum = 0; 
    for (IntWritable val : values) { 
    sum += val.get(); 
    } 
    result.set(sum); 
    context.write(key, result); 
} 
} 

public static void main(String[] args) throws Exception { 

Configuration conf = new Configuration(); 

Job job = Job.getInstance(conf, "word count"); 
job.setJarByClass(WordCount.class); 
job.setMapperClass(TokenizerMapper.class); 
job.setCombinerClass(IntSumReducer.class); 
job.setReducerClass(IntSumReducer.class); 
job.setOutputKeyClass(Text.class); 
job.setOutputValueClass(IntWritable.class); 
FileInputFormat.addInputPath(job, new Path(args[0])); 
FileOutputFormat.setOutputPath(job, new Path(args[1])); 

System.exit(job.waitForCompletion(true) ? 0 : 1); 
} 
} 
//////////// 

Quand je lance, je me l'erreur suivante:

17/09/02 09:36:57 INFO mapreduce.Job: Job job_local737469568_0001 failed with state FAILED due to: NA 
17/09/02 09:36:57 WARN mapred.LocalJobRunner: job_local737469568_0001 
java.lang.Exception: java.lang.UnsatisfiedLinkError:  WordCount$TokenizerMapper$HelloWorld.myprint(C)V 
at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:489) 
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:549) 
Caused by: java.lang.UnsatisfiedLinkError: WordCount$TokenizerMapper$HelloWorld.myprint(C)V 
at WordCount$TokenizerMapper$HelloWorld.myprint(Native Method) 
at WordCount$TokenizerMapper$HelloWorld.mymain(WordCount.java:26) 
at WordCount$TokenizerMapper.map(WordCount.java:42) 
at WordCount$TokenizerMapper.map(WordCount.java:18) 
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146) 
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787) 
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) 
at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:270) 
+0

j'ai pu résoudre le problème. Cela avait à voir avec le chemin complet de la fonction appelée. J'ai créé HelloWorld.h/.cpp séparément du programme Hadoop actuel afin que le chemin complet ne corresponde pas. Une fois que j'ai résolu ce problème a été résolu. –

+0

si c'est le cas, veuillez mettre cela comme réponse à cette question, et la marquer comme résolue –

+0

@AbRan vous devez écrire votre solution comme réponse et l'accepter – Sergey

Répondre

0
//// Need to do following 
//// javah -jni WordCount 
//// This will generate WordCount_TokenizerMapper_HelloWorld.h as follows 

/* DO NOT EDIT THIS FILE - it is machine generated */ 
#include <jni.h> 
/* Header for class WordCount_TokenizerMapper_HelloWorld */ 

#ifndef _Included_WordCount_TokenizerMapper_HelloWorld 
#define _Included_WordCount_TokenizerMapper_HelloWorld 
#ifdef __cplusplus 
extern "C" { 
#endif 
/* 
* Class:  WordCount_TokenizerMapper_HelloWorld 
* Method: myprint 
* Signature: (C)V 
*/ 

//// Note the function signature here. This is how JNI expects to find 
//// this function in the .so created for C++ code. 
//// BOTTOMLINE: Call javah on the actual Java src file from which you 
////    want to call C++ function. DON'T DO IT SEPARATELY. 
JNIEXPORT void JNICALL Java_WordCount_00024TokenizerMapper_00024HelloWorld_myprint(JNIEnv *, jobject, jchar); 

#ifdef __cplusplus 
} 
#endif 
#endif