From f60636a6aa12ca446ca473f93ca67362c8cc2aa5 Mon Sep 17 00:00:00 2001 From: Jason Kulatunga Date: Thu, 28 Apr 2022 22:33:09 -0700 Subject: [PATCH] broke scrutiny_repository.go into multiple files for easier exploration & maintenance. --- .../pkg/database/scrutiny_repository.go | 68 ------------------ .../scrutiny_repository_temperature.go | 72 +++++++++++++++++++ 2 files changed, 72 insertions(+), 68 deletions(-) diff --git a/webapp/backend/pkg/database/scrutiny_repository.go b/webapp/backend/pkg/database/scrutiny_repository.go index 2ac946d..5d3ca51 100644 --- a/webapp/backend/pkg/database/scrutiny_repository.go +++ b/webapp/backend/pkg/database/scrutiny_repository.go @@ -13,7 +13,6 @@ import ( "github.com/sirupsen/logrus" "gorm.io/driver/sqlite" "gorm.io/gorm" - "strings" "time" ) @@ -452,70 +451,3 @@ func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []str } return []string{DURATION_KEY_WEEK} } - -func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string { - - /* - import "influxdata/influxdb/schema" - weekData = from(bucket: "metrics") - |> range(start: -1w, stop: now()) - |> filter(fn: (r) => r["_measurement"] == "temp" ) - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) - |> group(columns: ["device_wwn"]) - |> toInt() - - monthData = from(bucket: "metrics_weekly") - |> range(start: -1mo, stop: now()) - |> filter(fn: (r) => r["_measurement"] == "temp" ) - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) - |> group(columns: ["device_wwn"]) - |> toInt() - - union(tables: [weekData, monthData]) - |> group(columns: ["device_wwn"]) - |> sort(columns: ["_time"], desc: false) - |> schema.fieldsAsCols() - - */ - - partialQueryStr := []string{ - `import "influxdata/influxdb/schema"`, - } - - nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey) - - subQueryNames := []string{} - for _, nestedDurationKey := range nestedDurationKeys { - bucketName := sr.lookupBucketName(nestedDurationKey) - durationRange := sr.lookupDuration(nestedDurationKey) - - subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey)) - partialQueryStr = append(partialQueryStr, []string{ - fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName), - fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]), - `|> filter(fn: (r) => r["_measurement"] == "temp" )`, - `|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)`, - `|> group(columns: ["device_wwn"])`, - `|> toInt()`, - "", - }...) - } - - if len(subQueryNames) == 1 { - //there's only one bucket being queried, no need to union, just aggregate the dataset and return - partialQueryStr = append(partialQueryStr, []string{ - subQueryNames[0], - "|> schema.fieldsAsCols()", - "|> yield()", - }...) - } else { - partialQueryStr = append(partialQueryStr, []string{ - fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")), - `|> group(columns: ["device_wwn"])`, - `|> sort(columns: ["_time"], desc: false)`, - "|> schema.fieldsAsCols()", - }...) - } - - return strings.Join(partialQueryStr, "\n") -} diff --git a/webapp/backend/pkg/database/scrutiny_repository_temperature.go b/webapp/backend/pkg/database/scrutiny_repository_temperature.go index 574ca2c..fb3be6a 100644 --- a/webapp/backend/pkg/database/scrutiny_repository_temperature.go +++ b/webapp/backend/pkg/database/scrutiny_repository_temperature.go @@ -6,6 +6,7 @@ import ( "github.com/analogj/scrutiny/webapp/backend/pkg/models/collector" "github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements" influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "strings" "time" ) @@ -93,3 +94,74 @@ func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, du return deviceTempHistory, nil } + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Helper Methods +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string { + + /* + import "influxdata/influxdb/schema" + weekData = from(bucket: "metrics") + |> range(start: -1w, stop: now()) + |> filter(fn: (r) => r["_measurement"] == "temp" ) + |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) + |> group(columns: ["device_wwn"]) + |> toInt() + + monthData = from(bucket: "metrics_weekly") + |> range(start: -1mo, stop: now()) + |> filter(fn: (r) => r["_measurement"] == "temp" ) + |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) + |> group(columns: ["device_wwn"]) + |> toInt() + + union(tables: [weekData, monthData]) + |> group(columns: ["device_wwn"]) + |> sort(columns: ["_time"], desc: false) + |> schema.fieldsAsCols() + + */ + + partialQueryStr := []string{ + `import "influxdata/influxdb/schema"`, + } + + nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey) + + subQueryNames := []string{} + for _, nestedDurationKey := range nestedDurationKeys { + bucketName := sr.lookupBucketName(nestedDurationKey) + durationRange := sr.lookupDuration(nestedDurationKey) + + subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey)) + partialQueryStr = append(partialQueryStr, []string{ + fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName), + fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]), + `|> filter(fn: (r) => r["_measurement"] == "temp" )`, + `|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)`, + `|> group(columns: ["device_wwn"])`, + `|> toInt()`, + "", + }...) + } + + if len(subQueryNames) == 1 { + //there's only one bucket being queried, no need to union, just aggregate the dataset and return + partialQueryStr = append(partialQueryStr, []string{ + subQueryNames[0], + "|> schema.fieldsAsCols()", + "|> yield()", + }...) + } else { + partialQueryStr = append(partialQueryStr, []string{ + fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")), + `|> group(columns: ["device_wwn"])`, + `|> sort(columns: ["_time"], desc: false)`, + "|> schema.fieldsAsCols()", + }...) + } + + return strings.Join(partialQueryStr, "\n") +}