AWS
Spark

save json via spark dataframe on AWS S3

spark.sparkContext.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", "[access key id]")
spark.sparkContext.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", "[secrete access key]")

val bucketName = "[buckect name]"
val s3path = s"s3n://$bucketName/some_directory/data.json"
df.write.mode("overwrite").json(s3path)