final class DataFrameWriterV2[T] extends CreateTableWriter[T]
Interface used to write a org.apache.spark.sql.Dataset to external storage using the v2 API.
- Annotations
 - @Experimental()
 - Source
 - DataFrameWriterV2.scala
 - Since
 3.0.0
- Alphabetic
 - By Inheritance
 
- DataFrameWriterV2
 - CreateTableWriter
 - WriteConfigMethods
 - AnyRef
 - Any
 
- Hide All
 - Show All
 
- Public
 - All
 
Value Members
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        !=(arg0: Any): Boolean
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        ##(): Int
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        ==(arg0: Any): Boolean
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        append(): Unit
      
      
      
Append the contents of the data frame to the output table.
Append the contents of the data frame to the output table.
If the output table does not exist, this operation will fail with org.apache.spark.sql.catalyst.analysis.NoSuchTableException. The data frame will be validated to ensure it is compatible with the existing table.
- Annotations
 - @throws( classOf[NoSuchTableException] )
 - Exceptions thrown
 org.apache.spark.sql.catalyst.analysis.NoSuchTableExceptionIf the table does not exist
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        asInstanceOf[T0]: T0
      
      
      
- Definition Classes
 - Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        clone(): AnyRef
      
      
      
- Attributes
 - protected[lang]
 - Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... ) @native()
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        create(): Unit
      
      
      
Create a new table from the contents of the data frame.
Create a new table from the contents of the data frame.
The new table's schema, partition layout, properties, and other configuration will be based on the configuration set on this writer.
If the output table exists, this operation will fail with org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException.
- Definition Classes
 - DataFrameWriterV2 → CreateTableWriter
 - Exceptions thrown
 org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsExceptionIf the table already exists
 - 
      
      
      
        
      
    
      
        
        def
      
      
        createOrReplace(): Unit
      
      
      
Create a new table or replace an existing table with the contents of the data frame.
Create a new table or replace an existing table with the contents of the data frame.
The output table's schema, partition layout, properties, and other configuration will be based on the contents of the data frame and the configuration set on this writer. If the table exists, its configuration and data will be replaced.
- Definition Classes
 - DataFrameWriterV2 → CreateTableWriter
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        eq(arg0: AnyRef): Boolean
      
      
      
- Definition Classes
 - AnyRef
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        equals(arg0: Any): Boolean
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        finalize(): Unit
      
      
      
- Attributes
 - protected[lang]
 - Definition Classes
 - AnyRef
 - Annotations
 - @throws( classOf[java.lang.Throwable] )
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        getClass(): Class[_]
      
      
      
- Definition Classes
 - AnyRef → Any
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        hashCode(): Int
      
      
      
- Definition Classes
 - AnyRef → Any
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        isInstanceOf[T0]: Boolean
      
      
      
- Definition Classes
 - Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        ne(arg0: AnyRef): Boolean
      
      
      
- Definition Classes
 - AnyRef
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        notify(): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        notifyAll(): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        option(key: String, value: String): DataFrameWriterV2[T]
      
      
      
Add a write option.
Add a write option.
- Definition Classes
 - DataFrameWriterV2 → WriteConfigMethods
 - Since
 3.0.0
 - 
      
      
      
        
      
    
      
        
        def
      
      
        option(key: String, value: Double): CreateTableWriter[T]
      
      
      
Add a double output option.
Add a double output option.
- Definition Classes
 - WriteConfigMethods
 - Since
 3.0.0
 - 
      
      
      
        
      
    
      
        
        def
      
      
        option(key: String, value: Long): CreateTableWriter[T]
      
      
      
Add a long output option.
Add a long output option.
- Definition Classes
 - WriteConfigMethods
 - Since
 3.0.0
 - 
      
      
      
        
      
    
      
        
        def
      
      
        option(key: String, value: Boolean): CreateTableWriter[T]
      
      
      
Add a boolean output option.
Add a boolean output option.
- Definition Classes
 - WriteConfigMethods
 - Since
 3.0.0
 - 
      
      
      
        
      
    
      
        
        def
      
      
        options(options: Map[String, String]): DataFrameWriterV2[T]
      
      
      
- Definition Classes
 - DataFrameWriterV2 → WriteConfigMethods
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        options(options: Map[String, String]): DataFrameWriterV2[T]
      
      
      
- Definition Classes
 - DataFrameWriterV2 → WriteConfigMethods
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        overwrite(condition: Column): Unit
      
      
      
Overwrite rows matching the given filter condition with the contents of the data frame in the output table.
Overwrite rows matching the given filter condition with the contents of the data frame in the output table.
If the output table does not exist, this operation will fail with org.apache.spark.sql.catalyst.analysis.NoSuchTableException. The data frame will be validated to ensure it is compatible with the existing table.
- Annotations
 - @throws( classOf[NoSuchTableException] )
 - Exceptions thrown
 org.apache.spark.sql.catalyst.analysis.NoSuchTableExceptionIf the table does not exist
 - 
      
      
      
        
      
    
      
        
        def
      
      
        overwritePartitions(): Unit
      
      
      
Overwrite all partition for which the data frame contains at least one row with the contents of the data frame in the output table.
Overwrite all partition for which the data frame contains at least one row with the contents of the data frame in the output table.
This operation is equivalent to Hive's
INSERT OVERWRITE ... PARTITION, which replaces partitions dynamically depending on the contents of the data frame.If the output table does not exist, this operation will fail with org.apache.spark.sql.catalyst.analysis.NoSuchTableException. The data frame will be validated to ensure it is compatible with the existing table.
- Annotations
 - @throws( classOf[NoSuchTableException] )
 - Exceptions thrown
 org.apache.spark.sql.catalyst.analysis.NoSuchTableExceptionIf the table does not exist
 - 
      
      
      
        
      
    
      
        
        def
      
      
        partitionedBy(column: Column, columns: Column*): CreateTableWriter[T]
      
      
      
Partition the output table created by
create,createOrReplace, orreplaceusing the given columns or transforms.Partition the output table created by
create,createOrReplace, orreplaceusing the given columns or transforms.When specified, the table data will be stored by these values for efficient reads.
For example, when a table is partitioned by day, it may be stored in a directory layout like:
table/day=2019-06-01/table/day=2019-06-02/
Partitioning is one of the most widely used techniques to optimize physical data layout. It provides a coarse-grained index for skipping unnecessary data reads when queries have predicates on the partitioned columns. In order for partitioning to work well, the number of distinct values in each column should typically be less than tens of thousands.
- Definition Classes
 - DataFrameWriterV2 → CreateTableWriter
 - Annotations
 - @varargs()
 - Since
 3.0.0
 - 
      
      
      
        
      
    
      
        
        def
      
      
        replace(): Unit
      
      
      
Replace an existing table with the contents of the data frame.
Replace an existing table with the contents of the data frame.
The existing table's schema, partition layout, properties, and other configuration will be replaced with the contents of the data frame and the configuration set on this writer.
If the output table does not exist, this operation will fail with org.apache.spark.sql.catalyst.analysis.CannotReplaceMissingTableException.
- Definition Classes
 - DataFrameWriterV2 → CreateTableWriter
 - Exceptions thrown
 org.apache.spark.sql.catalyst.analysis.CannotReplaceMissingTableExceptionIf the table does not exist
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        synchronized[T0](arg0: ⇒ T0): T0
      
      
      
- Definition Classes
 - AnyRef
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        tableProperty(property: String, value: String): CreateTableWriter[T]
      
      
      
Add a table property.
Add a table property.
- Definition Classes
 - DataFrameWriterV2 → CreateTableWriter
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        toString(): String
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        using(provider: String): CreateTableWriter[T]
      
      
      
Specifies a provider for the underlying output data source.
Specifies a provider for the underlying output data source. Spark's default catalog supports "parquet", "json", etc.
- Definition Classes
 - DataFrameWriterV2 → CreateTableWriter
 - Since
 3.0.0
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... )
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(arg0: Long, arg1: Int): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... )
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(arg0: Long): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... ) @native()