Show the Stage ID and Task ID that corresponds to the max metric
digraph G {
0 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 0<br>local merged chunks fetched: 0<br>shuffle write time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 683.0: task 992))<br>remote merged bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 683.0: task 992))<br>remote merged blocks fetched: 0<br>records read: 0<br>local bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))<br>fetch wait time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 683.0: task 992))<br>remote bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))<br>merged fetch fallback count: 0<br>local blocks read: 0<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))<br>local merged bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))<br>number of partitions: 16<br>remote reqs duration total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 683.0: task 992))<br>remote bytes read to disk total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 683.0: task 992))"];
1 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>5.4 s (56 ms, 96 ms, 223 ms (stage 673.0: task 949))<br>peak memory total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>number of output rows: 0<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
2 [labelType="html" label="<b>StateStoreSave</b><br><br>number of shuffle partitions: 50<br>number of removed state rows: 0<br>data returned from Python workers total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>number of total state rows: 0<br>number of state store instances: 50<br>memory used by state total (min, med, max (stageId: taskId))<br>21.1 KiB (432.0 B, 432.0 B, 432.0 B (stage 673.0: task 942))<br>count of cache hit on states cache in provider: 300<br>number of output rows: 0<br>estimated size of state only on current version total (min, med, max (stageId: taskId))<br>5.1 KiB (104.0 B, 104.0 B, 104.0 B (stage 673.0: task 942))<br>number of rows which are dropped by watermark: 0<br>data sent to Python workers total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>count of cache miss on states cache in provider: 0<br>time to commit changes total (min, med, max (stageId: taskId))<br>5.4 s (56 ms, 96 ms, 223 ms (stage 673.0: task 949))<br>time to remove total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>number of updated state rows: 0<br>time to update total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>number of output rows: 0"];
3 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>peak memory total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>number of output rows: 0<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
4 [labelType="html" label="<b>StateStoreRestore</b><br><br>number of output rows: 0"];
5 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>peak memory total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>number of output rows: 0<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
6 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 0<br>local merged chunks fetched: 0<br>shuffle write time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 672.0: task 938))<br>remote merged bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>remote merged blocks fetched: 0<br>records read: 0<br>local bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>fetch wait time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>remote bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>merged fetch fallback count: 0<br>local blocks read: 0<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 672.0: task 938))<br>local merged bytes read total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>number of partitions: 50<br>remote reqs duration total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 673.0: task 942))<br>remote bytes read to disk total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 673.0: task 942))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 672.0: task 938))"];
7 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 672.0: task 938))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>531 ms (0 ms, 0 ms, 531 ms (stage 672.0: task 938))<br>peak memory total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 672.0: task 938))<br>number of output rows: 0<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
subgraph cluster8 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: total (min, med, max (stageId: taskId))\n531 ms (0 ms, 0 ms, 531 ms (stage 672.0: task 938))";
9 [labelType="html" label="<br><b>Project</b><br><br>"];
}
10 [labelType="html" label="<br><b>EventTimeWatermark</b><br><br>"];
subgraph cluster11 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n532 ms (0 ms, 0 ms, 532 ms (stage 672.0: task 938))";
12 [labelType="html" label="<br><b>Project</b><br><br>"];
}
13 [labelType="html" label="<br><b>Project</b><br><br>"];
14 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 0"];
15 [labelType="html" label="<b>MicroBatchScan</b><br><br>number of output rows: 1<br>estimated number of fetched offsets out of range: 0<br>number of data loss error: 0"];
1->0;
2->1;
3->2;
4->3;
5->4;
6->5;
7->6;
9->7;
10->9;
12->10;
13->12;
14->13;
15->14;
}
Project [data#23.uuid AS uuid#30, data#23.keyword AS keyword#31, data#23.inventory_code AS inventory_code#32, data#23.created_at AS created_at#33, data#23.date AS date#41]
Filter (((((isnotnull(value#8) AND NOT (RLIKE(from_json(StructField(agent,StringType,true), cast(value#8 as string), Some(Etc/UTC)).agent, Yeti|compatible|googlebot|google\.com\/bot\.html) <=> true)) AND (cast(from_json(StructField(date,StringType,true), cast(value#8 as string), Some(Etc/UTC)).date as date) = cast(from_utc_timestamp(2025-12-05 23:24:00.017, Asia/Seoul) as date))) AND (from_json(StructField(ad_type,StringType,true), cast(value#8 as string), Some(Etc/UTC)).ad_type = request)) AND isnotnull(from_json(StructField(keyword,StringType,true), cast(value#8 as string), Some(Etc/UTC)).keyword)) AND NOT (from_json(StructField(keyword,StringType,true), cast(value#8 as string), Some(Etc/UTC)).keyword = ))
MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan