196、Spark 2.0之Dataset开发详解-untype
作者:
ZFH__ZJ | 来源:发表于
2019-02-12 12:36 被阅读0次
代码
object UntypedOperation {
case class Employee(name: String, age: Long, depId: Long, gender: String, salary: Long)
case class Department(id: Long, name: String)
def main(args: Array[String]): Unit = {
val sparkSession = SparkSession
.builder()
.appName("BasicOperation")
.master("local")
.getOrCreate()
import sparkSession.implicits._
import org.apache.spark.sql.functions._
val employeePath = this.getClass.getClassLoader.getResource("employee.json").getPath
val departmentPath = this.getClass.getClassLoader.getResource("department.json").getPath
val employeeDF = sparkSession.read.json(employeePath)
val departmentDF = sparkSession.read.json(departmentPath)
val employeeDS = employeeDF.as[Employee]
val departmentDS = departmentDF.as[Department]
employeeDS.where("age > 20")
.join(departmentDS, $"depId" === $"id")
.groupBy(employeeDS("depId"))
.agg(avg(employeeDS("salary")))
.select("avg(salary)")
.show()
}
}
本文标题:196、Spark 2.0之Dataset开发详解-untype
本文链接:https://www.haomeiwen.com/subject/cnkqeqtx.html
网友评论