abstract class TaskContext extends Serializable
Contextual information about a task which can be read or mutated during execution. To access the TaskContext for a running task, use:
org.apache.spark.TaskContext.get()
- Source
 - TaskContext.scala
 
- Alphabetic
 - By Inheritance
 
- TaskContext
 - Serializable
 - AnyRef
 - Any
 
- Hide All
 - Show All
 
- Public
 - All
 
Instance Constructors
-  new TaskContext()
 
Abstract Value Members
- 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        addTaskCompletionListener(listener: TaskCompletionListener): TaskContext
      
      
      
Adds a (Java friendly) listener to be executed on task completion.
Adds a (Java friendly) listener to be executed on task completion. This will be called in all situations - success, failure, or cancellation. Adding a listener to an already completed task will result in that listener being called immediately.
Two listeners registered in the same thread will be invoked in reverse order of registration if the task completes after both are registered. There are no ordering guarantees for listeners registered in different threads, or for listeners registered after the task completes. Listeners are guaranteed to execute sequentially.
An example use is for HadoopRDD to register a callback to close the input stream.
Exceptions thrown by the listener will result in failure of the task.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        addTaskFailureListener(listener: TaskFailureListener): TaskContext
      
      
      
Adds a listener to be executed on task failure (which includes completion listener failure, if the task body did not already fail).
Adds a listener to be executed on task failure (which includes completion listener failure, if the task body did not already fail). Adding a listener to an already failed task will result in that listener being called immediately.
Note: Prior to Spark 3.4.0, failure listeners were only invoked if the main task body failed.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        attemptNumber(): Int
      
      
      
How many times this task has been attempted.
How many times this task has been attempted. The first task attempt will be assigned attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        cpus(): Int
      
      
      
CPUs allocated to the task.
CPUs allocated to the task.
- Annotations
 - @Since( "3.3.0" )
 
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        getLocalProperty(key: String): String
      
      
      
Get a local property set upstream in the driver, or null if it is missing.
Get a local property set upstream in the driver, or null if it is missing. See also
org.apache.spark.SparkContext.setLocalProperty. - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        getMetricsSources(sourceName: String): Seq[Source]
      
      
      
::DeveloperApi:: Returns all metrics sources with the given name which are associated with the instance which runs the task.
::DeveloperApi:: Returns all metrics sources with the given name which are associated with the instance which runs the task. For more information see
org.apache.spark.metrics.MetricsSystem.- Annotations
 - @DeveloperApi()
 
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        isCompleted(): Boolean
      
      
      
Returns true if the task has completed.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        isInterrupted(): Boolean
      
      
      
Returns true if the task has been killed.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        numPartitions(): Int
      
      
      
Total number of partitions in the stage that this task belongs to.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        partitionId(): Int
      
      
      
The ID of the RDD partition that is computed by this task.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        resources(): Map[String, ResourceInformation]
      
      
      
Resources allocated to the task.
Resources allocated to the task. The key is the resource name and the value is information about the resource. Please refer to org.apache.spark.resource.ResourceInformation for specifics.
- Annotations
 - @Evolving()
 
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        resourcesJMap(): Map[String, ResourceInformation]
      
      
      
(java-specific) Resources allocated to the task.
(java-specific) Resources allocated to the task. The key is the resource name and the value is information about the resource. Please refer to org.apache.spark.resource.ResourceInformation for specifics.
- Annotations
 - @Evolving()
 
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        stageAttemptNumber(): Int
      
      
      
How many times the stage that this task belongs to has been attempted.
How many times the stage that this task belongs to has been attempted. The first stage attempt will be assigned stageAttemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        stageId(): Int
      
      
      
The ID of the stage that this task belong to.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        taskAttemptId(): Long
      
      
      
An ID that is unique to this task attempt (within the same SparkContext, no two task attempts will share the same attempt ID).
An ID that is unique to this task attempt (within the same SparkContext, no two task attempts will share the same attempt ID). This is roughly equivalent to Hadoop's TaskAttemptID.
 - 
      
      
      
        
      
    
      
        abstract 
        def
      
      
        taskMetrics(): TaskMetrics
      
      
      
- Annotations
 - @DeveloperApi()
 
 
Concrete Value Members
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        !=(arg0: Any): Boolean
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        ##(): Int
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        ==(arg0: Any): Boolean
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        addTaskCompletionListener[U](f: (TaskContext) ⇒ U): TaskContext
      
      
      
Adds a listener in the form of a Scala closure to be executed on task completion.
Adds a listener in the form of a Scala closure to be executed on task completion. This will be called in all situations - success, failure, or cancellation. Adding a listener to an already completed task will result in that listener being called immediately.
An example use is for HadoopRDD to register a callback to close the input stream.
Exceptions thrown by the listener will result in failure of the task.
 - 
      
      
      
        
      
    
      
        
        def
      
      
        addTaskFailureListener(f: (TaskContext, Throwable) ⇒ Unit): TaskContext
      
      
      
Adds a listener to be executed on task failure (which includes completion listener failure, if the task body did not already fail).
Adds a listener to be executed on task failure (which includes completion listener failure, if the task body did not already fail). Adding a listener to an already failed task will result in that listener being called immediately.
Note: Prior to Spark 3.4.0, failure listeners were only invoked if the main task body failed.
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        asInstanceOf[T0]: T0
      
      
      
- Definition Classes
 - Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        clone(): AnyRef
      
      
      
- Attributes
 - protected[lang]
 - Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... ) @native()
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        eq(arg0: AnyRef): Boolean
      
      
      
- Definition Classes
 - AnyRef
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        equals(arg0: Any): Boolean
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        finalize(): Unit
      
      
      
- Attributes
 - protected[lang]
 - Definition Classes
 - AnyRef
 - Annotations
 - @throws( classOf[java.lang.Throwable] )
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        getClass(): Class[_]
      
      
      
- Definition Classes
 - AnyRef → Any
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        hashCode(): Int
      
      
      
- Definition Classes
 - AnyRef → Any
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        isInstanceOf[T0]: Boolean
      
      
      
- Definition Classes
 - Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        ne(arg0: AnyRef): Boolean
      
      
      
- Definition Classes
 - AnyRef
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        notify(): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        notifyAll(): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @native()
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        synchronized[T0](arg0: ⇒ T0): T0
      
      
      
- Definition Classes
 - AnyRef
 
 - 
      
      
      
        
      
    
      
        
        def
      
      
        toString(): String
      
      
      
- Definition Classes
 - AnyRef → Any
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... )
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(arg0: Long, arg1: Int): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... )
 
 - 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(arg0: Long): Unit
      
      
      
- Definition Classes
 - AnyRef
 - Annotations
 - @throws( ... ) @native()