本文共 3039 字,大约阅读时间需要 10 分钟。
在File –> Project Structure…–>Libraries添加spark-assembly-1.1.0-hadoop2.4.0的依赖包
3. java目录下编写Wordcount例子程序import org.apache.spark.SparkConf;import org.apache.spark.api.java.JavaPairRDD;import org.apache.spark.api.java.JavaRDD;import org.apache.spark.api.java.JavaSparkContext;import org.apache.spark.api.java.function.FlatMapFunction;import org.apache.spark.api.java.function.Function2;import org.apache.spark.api.java.function.PairFunction;import scala.Tuple2;import java.util.Arrays;import java.util.List;import java.util.regex.Pattern;public class JavaWordCount { private static final Pattern SPACE = Pattern.compile(" "); public static void main(String[] args)throws Exception { SparkConf sparkConf = new SparkConf().setAppName("JavaWordCount"); String srcPath = null; String desPath = "/apps/ca/yanh/output"; if (args.length == 1) { srcPath = args[0]; } else if(args.length == 2) { srcPath = args[0]; desPath = args[1]; } else { System.out.println("Usage: java -jar jarName[des]"); System.exit(1); } JavaSparkContext jsc = new JavaSparkContext(sparkConf); JavaRDD lines = jsc.textFile(srcPath, 1); System.out.println("Begin to split!"); JavaRDD words = lines.flatMap(new FlatMapFunction () { @Override public Iterable call(String s) throws Exception { return Arrays.asList(SPACE.split(" ")); } }); System.out.println("Begin to map!"); JavaPairRDD ones = words.mapToPair(new PairFunction () { @Override public Tuple2 call(String s) throws Exception { return new Tuple2 (s, 1); } }); System.out.println("Begin to reduce!"); JavaPairRDD counts = ones.reduceByKey(new Function2 () { @Override public Integer call(Integer i1, Integer i2) throws Exception { return i1 + i2; } }); System.out.println("Begin to save!"); /*List > output = counts.collect(); for(Tuple2 tuple: output) { System.out.println(tuple._1() + ": " + tuple._2()); }*/ counts.saveAsTextFile(desPath); jsc.stop(); }}
输入main class入口函数名,将Output Layout下所有jar包删掉(因为spark运行环境已经包含了这些包),然后Apply
编译程序:Build–>Build Artifacts…,然后选择要编译的项目进行编译
在当前工程生成的out目录下就可以找到输出的jar包
这是由于缺少本地库依赖和压缩包引起。
在此提供了这个包: spark-submit命令: spark-submit –driver-library-path :/usr/lib/hadoop/lib/native/ –jars /usr/lib/hadoop/lib/hadoop-lzo-0.6.0.jar –class JavaWordCount ~/JavaWordCount.jar /apps/ca/yanh/data/README.md