broke scrutiny_repository.go into multiple files for easier exploration & maintenance.

pull/228/head
Jason Kulatunga 3 years ago
parent 7a7771981a
commit f60636a6aa

@ -13,7 +13,6 @@ import (
"github.com/sirupsen/logrus"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"strings"
"time"
)
@ -452,70 +451,3 @@ func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []str
}
return []string{DURATION_KEY_WEEK}
}
func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
/*
import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
|> range(start: -1w, stop: now())
|> filter(fn: (r) => r["_measurement"] == "temp" )
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|> group(columns: ["device_wwn"])
|> toInt()
monthData = from(bucket: "metrics_weekly")
|> range(start: -1mo, stop: now())
|> filter(fn: (r) => r["_measurement"] == "temp" )
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|> group(columns: ["device_wwn"])
|> toInt()
union(tables: [weekData, monthData])
|> group(columns: ["device_wwn"])
|> sort(columns: ["_time"], desc: false)
|> schema.fieldsAsCols()
*/
partialQueryStr := []string{
`import "influxdata/influxdb/schema"`,
}
nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey)
subQueryNames := []string{}
for _, nestedDurationKey := range nestedDurationKeys {
bucketName := sr.lookupBucketName(nestedDurationKey)
durationRange := sr.lookupDuration(nestedDurationKey)
subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey))
partialQueryStr = append(partialQueryStr, []string{
fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName),
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
`|> filter(fn: (r) => r["_measurement"] == "temp" )`,
`|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)`,
`|> group(columns: ["device_wwn"])`,
`|> toInt()`,
"",
}...)
}
if len(subQueryNames) == 1 {
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
partialQueryStr = append(partialQueryStr, []string{
subQueryNames[0],
"|> schema.fieldsAsCols()",
"|> yield()",
}...)
} else {
partialQueryStr = append(partialQueryStr, []string{
fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")),
`|> group(columns: ["device_wwn"])`,
`|> sort(columns: ["_time"], desc: false)`,
"|> schema.fieldsAsCols()",
}...)
}
return strings.Join(partialQueryStr, "\n")
}

@ -6,6 +6,7 @@ import (
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
"strings"
"time"
)
@ -93,3 +94,74 @@ func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, du
return deviceTempHistory, nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Helper Methods
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
/*
import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
|> range(start: -1w, stop: now())
|> filter(fn: (r) => r["_measurement"] == "temp" )
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|> group(columns: ["device_wwn"])
|> toInt()
monthData = from(bucket: "metrics_weekly")
|> range(start: -1mo, stop: now())
|> filter(fn: (r) => r["_measurement"] == "temp" )
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|> group(columns: ["device_wwn"])
|> toInt()
union(tables: [weekData, monthData])
|> group(columns: ["device_wwn"])
|> sort(columns: ["_time"], desc: false)
|> schema.fieldsAsCols()
*/
partialQueryStr := []string{
`import "influxdata/influxdb/schema"`,
}
nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey)
subQueryNames := []string{}
for _, nestedDurationKey := range nestedDurationKeys {
bucketName := sr.lookupBucketName(nestedDurationKey)
durationRange := sr.lookupDuration(nestedDurationKey)
subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey))
partialQueryStr = append(partialQueryStr, []string{
fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName),
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
`|> filter(fn: (r) => r["_measurement"] == "temp" )`,
`|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)`,
`|> group(columns: ["device_wwn"])`,
`|> toInt()`,
"",
}...)
}
if len(subQueryNames) == 1 {
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
partialQueryStr = append(partialQueryStr, []string{
subQueryNames[0],
"|> schema.fieldsAsCols()",
"|> yield()",
}...)
} else {
partialQueryStr = append(partialQueryStr, []string{
fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")),
`|> group(columns: ["device_wwn"])`,
`|> sort(columns: ["_time"], desc: false)`,
"|> schema.fieldsAsCols()",
}...)
}
return strings.Join(partialQueryStr, "\n")
}

Loading…
Cancel
Save