2.向JDBC写数据
object SparkSQL03_Datasource {
def main(args: Array[String]): Unit = {
// 创建上下文环境配置对象
val conf: SparkConf = new SparkConf().setMaster( " local[*] " ).setAppName( " SparkSQL01_Demo " )
// 创建SparkSession对象
val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
import spark.implicits._
val rdd: RDD[User2] = spark.sparkContext.makeRDD(List(User2( " lisi " , 20 ), User2( " zs " , 30 )))
val ds: Dataset[User2] = rdd.toDS
// 方式1:通用的方式 format指定写出类型
ds.write
.format( " jdbc " )
.option( " url " , " jdbc:mysql://hadoop202:3306/test " )
.option( " user " , " root " )
.option( " password " , " 123456 " )
.option( " dbtable " , " user " )
.mode(SaveMode.Append)
.save()
// 方式2:通过jdbc方法
val props: Properties = new Properties()
props.setProperty( " user " , " root " )
props.setProperty( " password " , " 123456 " )
ds.write.mode(SaveMode.Append).jdbc( " jdbc:mysql://hadoop202:3306/test " , " user " , props)
// 释放资源
spark.stop()
}
}
case class User2(name: String, age: Long)
View Code
SparkSQL读写JDBC
标签:pre div source save obj map code load() stop
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://haodehen.cn/did117185