194、Spark 2.0之Dataset开发详解-typed操
作者:
ZFH__ZJ | 来源:发表于
2019-02-12 09:29 被阅读0次
代码
object TypedOperation {
case class Employee(name: String, age: Long, depId: Long, gender: String, salary: Long)
def main(args: Array[String]): Unit = {
val sparkSession = SparkSession
.builder()
.appName("BasicOperation")
.master("local")
.getOrCreate()
import sparkSession.implicits._
import org.apache.spark.sql.functions._
val employeePath = this.getClass.getClassLoader.getResource("employee.json").getPath
val employeeDF = sparkSession.read.json(employeePath)
val employeeDS = employeeDF.as[Employee]
employeeDS.sort(employeeDS("salary").desc, employeeDS("age").asc).show()
}
}
本文标题:194、Spark 2.0之Dataset开发详解-typed操
本文链接:https://www.haomeiwen.com/subject/tnkqeqtx.html
网友评论