-
Notifications
You must be signed in to change notification settings - Fork 219
Expand file tree
/
Copy pathaggregate_executor.go
More file actions
133 lines (121 loc) · 3.33 KB
/
aggregate_executor.go
File metadata and controls
133 lines (121 loc) · 3.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*
* Radon
*
* Copyright 2018 The Radon Authors.
* Code is licensed under the GPLv3.
*
*/
package executor
import (
"planner"
"xcontext"
"github.com/xelabs/go-mysqlstack/sqlparser/depends/hack"
"github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes"
"github.com/xelabs/go-mysqlstack/xlog"
)
var (
_ Executor = &AggregateExecutor{}
)
// AggregateExecutor represents aggregate executor.
// Including: COUNT/MAX/MIN/SUM/AVG/GROUPBY.
type AggregateExecutor struct {
log *xlog.Log
plan planner.Plan
}
// NewAggregateExecutor creates new AggregateExecutor.
func NewAggregateExecutor(log *xlog.Log, plan planner.Plan) *AggregateExecutor {
return &AggregateExecutor{
log: log,
plan: plan,
}
}
// Execute used to execute the executor.
func (executor *AggregateExecutor) Execute(ctx *xcontext.ResultContext) error {
rs := ctx.Results
executor.aggregate(rs)
return nil
}
// Aggregate used to do rows-aggregator(COUNT/SUM/MIN/MAX/AVG) and grouped them into group-by fields.
func (executor *AggregateExecutor) aggregate(result *sqltypes.Result) {
var deIdxs []int
plan := executor.plan.(*planner.AggregatePlan)
if plan.Empty() {
return
}
aggrs := plan.NormalAggregators()
aggrLen := len(aggrs)
groupAggrs := plan.GroupAggregators()
groups := make(map[string][]sqltypes.Value)
for _, row1 := range result.Rows {
keySlice := []byte{0x01}
for _, v := range groupAggrs {
keySlice = append(keySlice, row1[v.Index].Raw()...)
keySlice = append(keySlice, 0x02)
}
key := hack.String(keySlice)
if row2, ok := groups[key]; !ok {
groups[key] = row1
} else {
if aggrLen > 0 {
groups[key] = operator(aggrs, row1)(row2)
}
}
}
// Handle the avg operator and rebuild the results.
i := 0
result.Rows = make([][]sqltypes.Value, len(groups))
for _, v := range groups {
for _, aggr := range aggrs {
switch aggr.Type {
case planner.AggrTypeAvg:
v1, v2 := v[aggr.Index], v[aggr.Index+1]
v[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.DivFn)
deIdxs = append(deIdxs, aggr.Index+1)
}
}
result.Rows[i] = v
i++
}
// Remove avg decompose columns.
result.RemoveColumns(deIdxs...)
}
// aggregate supported type: SUM/COUNT/MIN/MAX/AVG.
func operator(aggrs []planner.Aggregator, x []sqltypes.Value) func([]sqltypes.Value) []sqltypes.Value {
return func(y []sqltypes.Value) []sqltypes.Value {
ret := sqltypes.Row(x).Copy()
for _, aggr := range aggrs {
switch aggr.Type {
case planner.AggrTypeSum, planner.AggrTypeCount:
v1, v2 := x[aggr.Index], y[aggr.Index]
if v1.Type() == sqltypes.Null {
ret[aggr.Index] = v2
} else if v2.Type() == sqltypes.Null {
ret[aggr.Index] = v1
} else {
ret[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.SumFn)
}
case planner.AggrTypeMin:
v1, v2 := x[aggr.Index], y[aggr.Index]
if v1.Type() == sqltypes.Null {
ret[aggr.Index] = v2
} else if v2.Type() == sqltypes.Null {
ret[aggr.Index] = v1
} else {
ret[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.MinFn)
}
case planner.AggrTypeMax:
v1, v2 := x[aggr.Index], y[aggr.Index]
if v1.Type() == sqltypes.Null {
ret[aggr.Index] = v2
} else if v2.Type() == sqltypes.Null {
ret[aggr.Index] = v1
} else {
ret[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.MaxFn)
}
case planner.AggrTypeAvg:
// nop
}
}
return ret
}
}