forked from jleetutorial/python-spark-tutorial
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathRddDataframeConversion.py
More file actions
40 lines (29 loc) · 1.27 KB
/
RddDataframeConversion.py
File metadata and controls
40 lines (29 loc) · 1.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import sys
sys.path.insert(0, '.')
from pyspark.sql import SparkSession
from commons.Utils import Utils
def mapResponseRdd(line: str):
splits = Utils.COMMA_DELIMITER.split(line)
double1 = None if not splits[6] else float(splits[6])
double2 = None if not splits[14] else float(splits[14])
return splits[2], double1, splits[9], double2
def getColNames(line: str):
splits = Utils.COMMA_DELIMITER.split(line)
return [splits[2], splits[6], splits[9], splits[14]]
if __name__ == "__main__":
session = SparkSession.builder.appName("StackOverFlowSurvey").master("local[*]").getOrCreate()
sc = session.sparkContext
lines = sc.textFile("in/2016-stack-overflow-survey-responses.csv")
responseRDD = lines \
.filter(lambda line: not Utils.COMMA_DELIMITER.split(line)[2] == "country") \
.map(mapResponseRdd)
colNames = lines \
.filter(lambda line: Utils.COMMA_DELIMITER.split(line)[2] == "country") \
.map(getColNames)
responseDataFrame = responseRDD.toDF(colNames.collect()[0])
print("=== Print out schema ===")
responseDataFrame.printSchema()
print("=== Print 20 records of responses table ===")
responseDataFrame.show(20)
for response in responseDataFrame.rdd.take(10):
print(response)