@@ -19,7 +19,7 @@ package org.apache.spark.sql.execution.window
19
19
20
20
import org .apache .spark .rdd .RDD
21
21
import org .apache .spark .sql .catalyst .InternalRow
22
- import org .apache .spark .sql .catalyst .expressions .{Ascending , Attribute , DenseRank , Expression , Rank , RowNumber , SortOrder , UnsafeProjection , UnsafeRow }
22
+ import org .apache .spark .sql .catalyst .expressions .{Ascending , Attribute , Expression , SortOrder , UnsafeProjection , UnsafeRow }
23
23
import org .apache .spark .sql .catalyst .expressions .codegen .GenerateOrdering
24
24
import org .apache .spark .sql .catalyst .plans .physical .{AllTuples , ClusteredDistribution , Distribution , Partitioning }
25
25
import org .apache .spark .sql .execution .{SparkPlan , UnaryExecNode }
@@ -73,26 +73,23 @@ case class WindowGroupLimitExec(
73
73
74
74
protected override def doExecute (): RDD [InternalRow ] = {
75
75
val numOutputRows = longMetric(" numOutputRows" )
76
- rankLikeFunction match {
77
- case _ : RowNumber if partitionSpec.isEmpty =>
78
- child.execute().mapPartitionsInternal(SimpleLimitIterator (_, limit, numOutputRows))
79
- case _ : RowNumber =>
80
- child.execute().mapPartitionsInternal(new GroupedLimitIterator (_, output, partitionSpec,
81
- (input : Iterator [InternalRow ]) => SimpleLimitIterator (input, limit, numOutputRows)))
82
- case _ : Rank if partitionSpec.isEmpty =>
83
- child.execute().mapPartitionsInternal(
84
- RankLimitIterator (output, _, orderSpec, limit, numOutputRows))
85
- case _ : Rank =>
86
- child.execute().mapPartitionsInternal(new GroupedLimitIterator (_, output, partitionSpec,
87
- (input : Iterator [InternalRow ]) =>
88
- RankLimitIterator (output, input, orderSpec, limit, numOutputRows)))
89
- case _ : DenseRank if partitionSpec.isEmpty =>
90
- child.execute().mapPartitionsInternal(
91
- DenseRankLimitIterator (output, _, orderSpec, limit, numOutputRows))
92
- case _ : DenseRank =>
93
- child.execute().mapPartitionsInternal(new GroupedLimitIterator (_, output, partitionSpec,
94
- (input : Iterator [InternalRow ]) =>
95
- DenseRankLimitIterator (output, input, orderSpec, limit, numOutputRows)))
76
+
77
+ val evaluatorFactory =
78
+ new WindowGroupLimitEvaluatorFactory (
79
+ partitionSpec,
80
+ orderSpec,
81
+ rankLikeFunction,
82
+ limit,
83
+ child.output,
84
+ numOutputRows)
85
+
86
+ if (conf.usePartitionEvaluator) {
87
+ child.execute().mapPartitionsWithEvaluator(evaluatorFactory)
88
+ } else {
89
+ child.execute().mapPartitionsInternal { iter =>
90
+ val evaluator = evaluatorFactory.createEvaluator()
91
+ evaluator.eval(0 , iter)
92
+ }
96
93
}
97
94
}
98
95
0 commit comments