Registered tables are not cached in memory.
The method will simply create or replace the representation of the given plan with the request.registerTempTable createOrReplaceTempViewDataFrame
SQL metastore, .
DataFrame. :
df.createOrReplaceTempView("my_table") # df.registerTempTable("my_table") for spark <2.+
spark.cacheTable("my_table")
EDIT:
:
cacheTable :
scala> val df = Seq(("1",2),("b",3)).toDF
// df: org.apache.spark.sql.DataFrame = [_1: string, _2: int]
scala> sc.getPersistentRDDs
// res0: scala.collection.Map[Int,org.apache.spark.rdd.RDD[_]] = Map()
scala> df.createOrReplaceTempView("my_table")
scala> sc.getPersistentRDDs
// res2: scala.collection.Map[Int,org.apache.spark.rdd.RDD[_]] = Map()
scala> spark.catalog.cacheTable("my_table") // spark.cacheTable("...") before spark 2.0
scala> sc.getPersistentRDDs
// res4: scala.collection.Map[Int,org.apache.spark.rdd.RDD[_]] = Map(2 -> In-memory table my_table MapPartitionsRDD[2] at cacheTable at <console>:26)
cache.registerTempTable cache.createOrReplaceTempView :
scala> sc.getPersistentRDDs
scala> val df = Seq(("1",2),("b",3)).toDF
scala> df.createOrReplaceTempView("my_table")
scala> sc.getPersistentRDDs
scala> df.cache.createOrReplaceTempView("my_table")
scala> sc.getPersistentRDDs