chore: gofumpt

Format all Go files with gofumpt.

https://github.com/mvdan/gofumpt

	gofumpt -extra -w .
pull/586/head
Thomas Way 3 months ago
parent 5e33c33e75
commit 9d124df86b
No known key found for this signature in database
GPG Key ID: F98E7FF1F9F8C217

@ -3,27 +3,29 @@ package main
import (
"encoding/json"
"fmt"
"github.com/analogj/scrutiny/collector/pkg/collector"
"github.com/analogj/scrutiny/collector/pkg/config"
"github.com/analogj/scrutiny/collector/pkg/errors"
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
"github.com/sirupsen/logrus"
"io"
"log"
"os"
"strings"
"time"
"github.com/analogj/scrutiny/collector/pkg/collector"
"github.com/analogj/scrutiny/collector/pkg/config"
"github.com/analogj/scrutiny/collector/pkg/errors"
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
"github.com/sirupsen/logrus"
utils "github.com/analogj/go-util/utils"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
)
var goos string
var goarch string
var (
goos string
goarch string
)
func main() {
config, err := config.Create()
if err != nil {
fmt.Printf("FATAL: %+v\n", err)
@ -36,10 +38,10 @@ func main() {
configFilePath = configFilePathAlternative
}
//we're going to load the config file manually, since we need to validate it.
err = config.ReadConfig(configFilePath) // Find and read the config file
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
//ignore "could not find config file"
// we're going to load the config file manually, since we need to validate it.
err = config.ReadConfig(configFilePath) // Find and read the config file
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
// ignore "could not find config file"
} else if err != nil {
os.Exit(1)
}
@ -69,7 +71,6 @@ OPTIONS:
},
},
Before: func(c *cli.Context) error {
collectorMetrics := "AnalogJ/scrutiny/metrics"
var versionInfo string
@ -102,12 +103,12 @@ OPTIONS:
if c.IsSet("config") {
err = config.ReadConfig(c.String("config")) // Find and read the config file
if err != nil { // Handle errors reading the config file
//ignore "could not find config file"
// ignore "could not find config file"
fmt.Printf("Could not find config file at specified path: %s", c.String("config"))
return err
}
}
//override config with flags if set
// override config with flags if set
if c.IsSet("host-id") {
config.Set("host.id", c.String("host-id")) // set/override the host-id using CLI.
}
@ -121,8 +122,8 @@ OPTIONS:
}
if c.IsSet("api-endpoint") {
//if the user is providing an api-endpoint with a basepath (eg. http://localhost:8080/scrutiny),
//we need to ensure the basepath has a trailing slash, otherwise the url.Parse() path concatenation doesnt work.
// if the user is providing an api-endpoint with a basepath (eg. http://localhost:8080/scrutiny),
// we need to ensure the basepath has a trailing slash, otherwise the url.Parse() path concatenation doesnt work.
apiEndpoint := strings.TrimSuffix(c.String("api-endpoint"), "/") + "/"
config.Set("api.endpoint", apiEndpoint)
}
@ -142,7 +143,6 @@ OPTIONS:
collectorLogger,
config.GetString("api.endpoint"),
)
if err != nil {
return err
}
@ -159,7 +159,7 @@ OPTIONS:
Name: "api-endpoint",
Usage: "The api server endpoint",
EnvVars: []string{"COLLECTOR_API_ENDPOINT", "SCRUTINY_API_ENDPOINT"},
//SCRUTINY_API_ENDPOINT is deprecated, but kept for backwards compatibility
// SCRUTINY_API_ENDPOINT is deprecated, but kept for backwards compatibility
},
&cli.StringFlag{
@ -205,7 +205,7 @@ func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
var logFile *os.File
var err error
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
return nil, logFile, err

@ -2,24 +2,26 @@ package main
import (
"fmt"
"github.com/analogj/scrutiny/collector/pkg/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
"github.com/sirupsen/logrus"
"io"
"log"
"os"
"time"
"github.com/analogj/scrutiny/collector/pkg/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
"github.com/sirupsen/logrus"
utils "github.com/analogj/go-util/utils"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
)
var goos string
var goarch string
var (
goos string
goarch string
)
func main() {
cli.CommandHelpTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
@ -45,7 +47,6 @@ OPTIONS:
},
},
Before: func(c *cli.Context) error {
collectorSelfTest := "AnalogJ/scrutiny/selftest"
var versionInfo string
@ -75,7 +76,6 @@ OPTIONS:
Name: "run",
Usage: "Run the scrutiny self-test data collector",
Action: func(c *cli.Context) error {
collectorLogger := logrus.WithFields(logrus.Fields{
"type": "selftest",
})
@ -87,7 +87,7 @@ OPTIONS:
}
if c.IsSet("log-file") {
logFile, err := os.OpenFile(c.String("log-file"), os.O_CREATE|os.O_WRONLY, 0644)
logFile, err := os.OpenFile(c.String("log-file"), os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
logrus.Errorf("Failed to open log file %s for output: %s", c.String("log-file"), err)
return err
@ -96,12 +96,11 @@ OPTIONS:
logrus.SetOutput(io.MultiWriter(os.Stderr, logFile))
}
//TODO: pass in the collector, use configuration from collector-metrics
// TODO: pass in the collector, use configuration from collector-metrics
stCollector, err := collector.CreateSelfTestCollector(
collectorLogger,
c.String("api-endpoint"),
)
if err != nil {
return err
}
@ -138,5 +137,4 @@ OPTIONS:
if err != nil {
log.Fatal(color.HiRedString("ERROR: %v", err))
}
}

@ -3,9 +3,10 @@ package collector
import (
"bytes"
"encoding/json"
"github.com/sirupsen/logrus"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
var httpClient = &http.Client{Timeout: 60 * time.Second}
@ -15,7 +16,6 @@ type BaseCollector struct {
}
func (c *BaseCollector) getJson(url string, target interface{}) error {
r, err := httpClient.Get(url)
if err != nil {
return err
@ -25,7 +25,7 @@ func (c *BaseCollector) getJson(url string, target interface{}) error {
return json.NewDecoder(r.Body).Decode(target)
}
func (c *BaseCollector) postJson(url string, body interface{}, target interface{}) error {
func (c *BaseCollector) postJson(url string, body, target interface{}) error {
requestBody, err := json.Marshal(body)
if err != nil {
return err

@ -4,6 +4,11 @@ import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"os"
"os/exec"
"strings"
"github.com/analogj/scrutiny/collector/pkg/common/shell"
"github.com/analogj/scrutiny/collector/pkg/config"
"github.com/analogj/scrutiny/collector/pkg/detect"
@ -11,10 +16,6 @@ import (
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/samber/lo"
"github.com/sirupsen/logrus"
"net/url"
"os"
"os/exec"
"strings"
)
type MetricsCollector struct {
@ -49,7 +50,7 @@ func (mc *MetricsCollector) Run() error {
}
apiEndpoint, _ := url.Parse(mc.apiEndpoint.String())
apiEndpoint, _ = apiEndpoint.Parse("api/devices/register") //this acts like filepath.Join()
apiEndpoint, _ = apiEndpoint.Parse("api/devices/register") // this acts like filepath.Join()
deviceRespWrapper := new(models.DeviceWrapper)
@ -62,7 +63,7 @@ func (mc *MetricsCollector) Run() error {
return err
}
//filter any device with empty wwn (they are invalid)
// filter any device with empty wwn (they are invalid)
detectedStorageDevices := lo.Filter[models.Device](rawDetectedStorageDevices, func(dev models.Device, _ int) bool {
return len(dev.WWN) > 0
})
@ -83,19 +84,19 @@ func (mc *MetricsCollector) Run() error {
return errors.ApiServerCommunicationError("An error occurred while retrieving filtered devices")
} else {
mc.logger.Debugln(deviceRespWrapper)
//var wg sync.WaitGroup
// var wg sync.WaitGroup
for _, device := range deviceRespWrapper.Data {
// execute collection in parallel go-routines
//wg.Add(1)
//go mc.Collect(&wg, device.WWN, device.DeviceName, device.DeviceType)
// wg.Add(1)
// go mc.Collect(&wg, device.WWN, device.DeviceName, device.DeviceType)
mc.Collect(device.WWN, device.DeviceName, device.DeviceType)
// TODO: we may need to sleep for between each call to smartctl -a
//time.Sleep(30 * time.Millisecond)
// time.Sleep(30 * time.Millisecond)
}
//mc.logger.Infoln("Main: Waiting for workers to finish")
//wg.Wait()
// mc.logger.Infoln("Main: Waiting for workers to finish")
// wg.Wait()
mc.logger.Infoln("Main: Completed")
}
@ -113,9 +114,9 @@ func (mc *MetricsCollector) Validate() error {
return nil
}
//func (mc *MetricsCollector) Collect(wg *sync.WaitGroup, deviceWWN string, deviceName string, deviceType string) {
func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceType string) {
//defer wg.Done()
// func (mc *MetricsCollector) Collect(wg *sync.WaitGroup, deviceWWN string, deviceName string, deviceType string) {
func (mc *MetricsCollector) Collect(deviceWWN, deviceName, deviceType string) {
// defer wg.Done()
if len(deviceWWN) == 0 {
mc.logger.Errorf("no device WWN detected for %s. Skipping collection for this device (no data association possible).\n", deviceName)
return
@ -124,7 +125,7 @@ func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceT
fullDeviceName := fmt.Sprintf("%s%s", detect.DevicePrefix(), deviceName)
args := strings.Split(mc.config.GetCommandMetricsSmartArgs(fullDeviceName), " ")
//only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
// only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
if len(deviceType) > 0 && deviceType != "scsi" && deviceType != "ata" {
args = append(args, "--device", deviceType)
}
@ -145,7 +146,7 @@ func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceT
}
return
} else {
//successful run, pass the results directly to webapp backend for parsing and processing.
// successful run, pass the results directly to webapp backend for parsing and processing.
mc.Publish(deviceWWN, resultBytes)
}
}

@ -1,9 +1,10 @@
package collector
import (
"github.com/stretchr/testify/require"
"net/url"
"testing"
"github.com/stretchr/testify/require"
)
func TestApiEndpointParse(t *testing.T) {
@ -19,7 +20,7 @@ func TestApiEndpointParse(t *testing.T) {
func TestApiEndpointParse_WithBasepathWithoutTrailingSlash(t *testing.T) {
baseURL, _ := url.Parse("http://localhost:8080/scrutiny")
//This testcase is unexpected and can cause issues. We need to ensure the apiEndpoint always has a trailing slash.
// This testcase is unexpected and can cause issues. We need to ensure the apiEndpoint always has a trailing slash.
url1, _ := baseURL.Parse("d/e")
require.Equal(t, "http://localhost:8080/d/e", url1.String())

@ -1,8 +1,9 @@
package collector
import (
"github.com/sirupsen/logrus"
"net/url"
"github.com/sirupsen/logrus"
)
type SelfTestCollector struct {

@ -3,11 +3,12 @@ package shell
import (
"bytes"
"errors"
"github.com/sirupsen/logrus"
"io"
"os/exec"
"path"
"strings"
"github.com/sirupsen/logrus"
)
type localShell struct{}
@ -41,5 +42,4 @@ func (s *localShell) Command(logger *logrus.Entry, cmdName string, cmdArgs []str
err := cmd.Run()
return stdBuffer.String(), err
}

@ -1,21 +1,22 @@
package shell
import (
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"os/exec"
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
)
func TestLocalShellCommand(t *testing.T) {
t.Parallel()
//setup
// setup
testShell := localShell{}
//test
// test
result, err := testShell.Command(logrus.WithField("exec", "test"), "echo", []string{"hello world"}, "", nil)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "hello world\n", result)
}
@ -23,13 +24,13 @@ func TestLocalShellCommand(t *testing.T) {
func TestLocalShellCommand_Date(t *testing.T) {
t.Parallel()
//setup
// setup
testShell := localShell{}
//test
// test
_, err := testShell.Command(logrus.WithField("exec", "test"), "date", []string{}, "", nil)
//assert
// assert
require.NoError(t, err)
}
@ -54,13 +55,13 @@ func TestLocalShellCommand_Date(t *testing.T) {
func TestLocalShellCommand_InvalidCommand(t *testing.T) {
t.Parallel()
//setup
// setup
testShell := localShell{}
//test
// test
_, err := testShell.Command(logrus.WithField("exec", "test"), "invalid_binary", []string{}, "", nil)
//assert
// assert
_, castOk := err.(*exec.ExitError)
require.False(t, castOk)
}

@ -2,15 +2,16 @@ package config
import (
"fmt"
"log"
"os"
"sort"
"strings"
"github.com/analogj/go-util/utils"
"github.com/analogj/scrutiny/collector/pkg/errors"
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/mitchellh/mapstructure"
"github.com/spf13/viper"
"log"
"os"
"sort"
"strings"
)
// When initializing this class the following methods must be called:
@ -23,7 +24,7 @@ type configuration struct {
deviceOverrides []models.ScanOverride
}
//Viper uses the following precedence order. Each item takes precedence over the item below it:
// Viper uses the following precedence order. Each item takes precedence over the item below it:
// explicit call to Set
// flag
// env
@ -33,7 +34,7 @@ type configuration struct {
func (c *configuration) Init() error {
c.Viper = viper.New()
//set defaults
// set defaults
c.SetDefault("host.id", "")
c.SetDefault("devices", []string{})
@ -48,14 +49,14 @@ func (c *configuration) Init() error {
c.SetDefault("commands.metrics_info_args", "--info --json")
c.SetDefault("commands.metrics_smart_args", "--xall --json")
//c.SetDefault("collect.short.command", "-a -o on -S on")
// c.SetDefault("collect.short.command", "-a -o on -S on")
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
// if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
c.SetConfigType("yaml")
//c.SetConfigName("drawbridge")
//c.AddConfigPath("$HOME/")
// c.SetConfigName("drawbridge")
// c.AddConfigPath("$HOME/")
//CLI options will be added via the `Set()` function
// CLI options will be added via the `Set()` function
return nil
}
@ -95,8 +96,7 @@ func (c *configuration) ReadConfig(configFilePath string) error {
// This function ensures that the merged config works correctly.
func (c *configuration) ValidateConfig() error {
//TODO:
// TODO:
// check that device prefix matches OS
// check that schema of config file is valid
@ -110,7 +110,7 @@ func (c *configuration) ValidateConfig() error {
errorStrings := []string{}
for configKey, commandArgString := range commandArgStrings {
args := strings.Split(commandArgString, " ")
//ensure that the args string contains `--json` or `-j` flag
// ensure that the args string contains `--json` or `-j` flag
containsJsonFlag := false
containsDeviceFlag := false
for _, flag := range args {
@ -130,7 +130,7 @@ func (c *configuration) ValidateConfig() error {
errorStrings = append(errorStrings, fmt.Sprintf("configuration key '%s' must not contain '--device' or '-d' flag", configKey))
}
}
//sort(errorStrings)
// sort(errorStrings)
sort.Strings(errorStrings)
if len(errorStrings) == 0 {
@ -160,7 +160,7 @@ func (c *configuration) GetCommandMetricsInfoArgs(deviceName string) string {
for _, deviceOverrides := range overrides {
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
//found matching device
// found matching device
if len(deviceOverrides.Commands.MetricsInfoArgs) > 0 {
return deviceOverrides.Commands.MetricsInfoArgs
} else {
@ -176,7 +176,7 @@ func (c *configuration) GetCommandMetricsSmartArgs(deviceName string) string {
for _, deviceOverrides := range overrides {
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
//found matching device
// found matching device
if len(deviceOverrides.Commands.MetricsSmartArgs) > 0 {
return deviceOverrides.Commands.MetricsSmartArgs
} else {

@ -1,38 +1,39 @@
package config_test
import (
"path"
"testing"
"github.com/analogj/scrutiny/collector/pkg/config"
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/stretchr/testify/require"
"path"
"testing"
)
func TestConfiguration_InvalidConfigPath(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig("does_not_exist.yaml")
//assert
// assert
require.Error(t, err, "should return an error")
}
func TestConfiguration_GetScanOverrides_Simple(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "simple_device.yaml"))
require.NoError(t, err, "should correctly load simple device config")
scanOverrides := testConfig.GetDeviceOverrides()
//assert
// assert
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat"}, Ignore: false}}, scanOverrides)
}
@ -40,15 +41,15 @@ func TestConfiguration_GetScanOverrides_Simple(t *testing.T) {
func TestConfiguration_GetScanOverrides_DeviceTypeComma(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "device_type_comma.yaml"))
require.NoError(t, err, "should correctly load simple device config")
scanOverrides := testConfig.GetDeviceOverrides()
//assert
// assert
require.Equal(t, []models.ScanOverride{
{Device: "/dev/sda", DeviceType: []string{"sat", "auto"}, Ignore: false},
{Device: "/dev/sdb", DeviceType: []string{"sat,auto"}, Ignore: false},
@ -58,30 +59,30 @@ func TestConfiguration_GetScanOverrides_DeviceTypeComma(t *testing.T) {
func TestConfiguration_GetScanOverrides_Ignore(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "ignore_device.yaml"))
require.NoError(t, err, "should correctly load ignore device config")
scanOverrides := testConfig.GetDeviceOverrides()
//assert
// assert
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}}, scanOverrides)
}
func TestConfiguration_GetScanOverrides_Raid(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "raid_device.yaml"))
require.NoError(t, err, "should correctly load ignore device config")
scanOverrides := testConfig.GetDeviceOverrides()
//assert
// assert
require.Equal(t, []models.ScanOverride{
{
Device: "/dev/bus/0",
@ -92,16 +93,17 @@ func TestConfiguration_GetScanOverrides_Raid(t *testing.T) {
Device: "/dev/twa0",
DeviceType: []string{"3ware,0", "3ware,1", "3ware,2", "3ware,3", "3ware,4", "3ware,5"},
Ignore: false,
}}, scanOverrides)
},
}, scanOverrides)
}
func TestConfiguration_InvalidCommands_MissingJson(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "invalid_commands_missing_json.yaml"))
require.EqualError(t, err, `ConfigValidationError: "configuration key 'commands.metrics_scan_args' is missing '--json' flag"`, "should throw an error because json flag is missing")
}
@ -109,10 +111,10 @@ func TestConfiguration_InvalidCommands_MissingJson(t *testing.T) {
func TestConfiguration_InvalidCommands_IncludesDevice(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "invalid_commands_includes_device.yaml"))
require.EqualError(t, err, `ConfigValidationError: "configuration key 'commands.metrics_info_args' must not contain '--device' or '-d' flag, configuration key 'commands.metrics_smart_args' must not contain '--device' or '-d' flag"`, "should throw an error because device flags detected")
}
@ -120,10 +122,10 @@ func TestConfiguration_InvalidCommands_IncludesDevice(t *testing.T) {
func TestConfiguration_OverrideCommands(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "override_commands.yaml"))
require.NoError(t, err, "should not throw an error")
require.Equal(t, "--xall --json -T permissive", testConfig.GetString("commands.metrics_smart_args"))
@ -132,15 +134,15 @@ func TestConfiguration_OverrideCommands(t *testing.T) {
func TestConfiguration_OverrideDeviceCommands_MetricsInfoArgs(t *testing.T) {
t.Parallel()
//setup
// setup
testConfig, _ := config.Create()
//test
// test
err := testConfig.ReadConfig(path.Join("testdata", "override_device_commands.yaml"))
require.NoError(t, err, "should correctly override device command")
//assert
// assert
require.Equal(t, "--info --json -T permissive", testConfig.GetCommandMetricsInfoArgs("/dev/sda"))
require.Equal(t, "--info --json", testConfig.GetCommandMetricsInfoArgs("/dev/sdb"))
//require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Commands: {MetricsInfoArgs: "--info --json -T "}}}, scanOverrides)
// require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Commands: {MetricsInfoArgs: "--info --json -T "}}}, scanOverrides)
}

@ -19,7 +19,7 @@ type Detect struct {
Shell shell.Interface
}
//private/common functions
// private/common functions
// This function calls smartctl --scan which can be used to detect storage devices.
// It has a couple of issues however:
@ -28,7 +28,7 @@ type Detect struct {
// To handle these issues, we have OS specific wrapper functions that update/modify these detected devices.
// models.Device returned from this function only contain the minimum data for smartctl to execute: device type and device name (device file).
func (d *Detect) SmartctlScan() ([]models.Device, error) {
//we use smartctl to detect all the drives available.
// we use smartctl to detect all the drives available.
args := strings.Split(d.Config.GetString("commands.metrics_scan_args"), " ")
detectedDeviceConnJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
if err != nil {
@ -55,7 +55,7 @@ func (d *Detect) SmartctlScan() ([]models.Device, error) {
func (d *Detect) SmartCtlInfo(device *models.Device) error {
fullDeviceName := fmt.Sprintf("%s%s", DevicePrefix(), device.DeviceName)
args := strings.Split(d.Config.GetCommandMetricsInfoArgs(fullDeviceName), " ")
//only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
// only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
if len(device.DeviceType) > 0 && device.DeviceType != "scsi" && device.DeviceType != "ata" {
args = append(args, "--device", device.DeviceType)
}
@ -74,9 +74,9 @@ func (d *Detect) SmartCtlInfo(device *models.Device) error {
return err
}
//WWN: this is a serial number/world-wide number that will not change.
//DeviceType and DeviceName are already populated, however may change between collector runs (eg. config/host restart)
//InterfaceType:
// WWN: this is a serial number/world-wide number that will not change.
// DeviceType and DeviceName are already populated, however may change between collector runs (eg. config/host restart)
// InterfaceType:
device.ModelName = availableDeviceInfo.ModelName
device.InterfaceSpeed = availableDeviceInfo.InterfaceSpeed.Current.String
device.SerialNumber = availableDeviceInfo.SerialNumber
@ -90,8 +90,8 @@ func (d *Detect) SmartCtlInfo(device *models.Device) error {
device.Manufacturer = availableDeviceInfo.Vendor
}
//populate WWN is possible if present
if availableDeviceInfo.Wwn.Naa != 0 { //valid values are 1-6 (5 is what we handle correctly)
// populate WWN is possible if present
if availableDeviceInfo.Wwn.Naa != 0 { // valid values are 1-6 (5 is what we handle correctly)
d.Logger.Info("Generating WWN")
wwn := Wwn{
Naa: availableDeviceInfo.Wwn.Naa,
@ -130,7 +130,7 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
DeviceName: strings.TrimPrefix(deviceFile, DevicePrefix()),
}
//find (or create) a slice to contain the devices in this group
// find (or create) a slice to contain the devices in this group
if groupedDevices[deviceFile] == nil {
groupedDevices[deviceFile] = []models.Device{}
}
@ -139,7 +139,7 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
groupedDevices[deviceFile] = append(groupedDevices[deviceFile], detectedDevice)
}
//now tha we've "grouped" all the devices, lets override any groups specified in the config file.
// now tha we've "grouped" all the devices, lets override any groups specified in the config file.
for _, overrideDevice := range d.Config.GetDeviceOverrides() {
overrideDeviceFile := strings.ToLower(overrideDevice.Device)
@ -148,7 +148,7 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
// this device file should be deleted if it exists
delete(groupedDevices, overrideDeviceFile)
} else {
//create a new device group, and replace the one generated by smartctl --scan
// create a new device group, and replace the one generated by smartctl --scan
overrideDeviceGroup := []models.Device{}
if overrideDevice.DeviceType != nil {
@ -160,20 +160,19 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
})
}
} else {
//user may have specified device in config file without device type (default to scanned device type)
// user may have specified device in config file without device type (default to scanned device type)
//check if the device file was detected by the scanner
// check if the device file was detected by the scanner
var deviceType string
if scannedDevice, foundScannedDevice := groupedDevices[overrideDeviceFile]; foundScannedDevice {
if len(scannedDevice) > 0 {
//take the device type from the first grouped device
// take the device type from the first grouped device
deviceType = scannedDevice[0].DeviceType
} else {
deviceType = "ata"
}
} else {
//fallback to ata if no scanned device detected
// fallback to ata if no scanned device detected
deviceType = "ata"
}
@ -188,7 +187,7 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
}
}
//flatten map
// flatten map
detectedDevices := []models.Device{}
for _, group := range groupedDevices {
detectedDevices = append(detectedDevices, group...)

@ -1,10 +1,11 @@
package detect
import (
"strings"
"github.com/analogj/scrutiny/collector/pkg/common/shell"
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/jaypipes/ghw"
"strings"
)
func DevicePrefix() string {
@ -19,22 +20,21 @@ func (d *Detect) Start() ([]models.Device, error) {
return nil, err
}
//smartctl --scan doesn't seem to detect mac nvme drives, lets see if we can detect them manually.
missingDevices, err := d.findMissingDevices(detectedDevices) //we dont care about the error here, just continue retrieving device info.
// smartctl --scan doesn't seem to detect mac nvme drives, lets see if we can detect them manually.
missingDevices, err := d.findMissingDevices(detectedDevices) // we dont care about the error here, just continue retrieving device info.
if err == nil {
detectedDevices = append(detectedDevices, missingDevices...)
}
//inflate device info for detected devices.
for ndx, _ := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
// inflate device info for detected devices.
for ndx := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) // ignore errors.
}
return detectedDevices, nil
}
func (d *Detect) findMissingDevices(detectedDevices []models.Device) ([]models.Device, error) {
missingDevices := []models.Device{}
block, err := ghw.Block()
@ -69,11 +69,10 @@ func (d *Detect) findMissingDevices(detectedDevices []models.Device) ([]models.D
continue
}
//check if device is already detected.
// check if device is already detected.
alreadyDetected := false
diskName := strings.TrimPrefix(disk.Name, DevicePrefix())
for _, detectedDevice := range detectedDevices {
if detectedDevice.DeviceName == diskName {
alreadyDetected = true
break
@ -89,7 +88,7 @@ func (d *Detect) findMissingDevices(detectedDevices []models.Device) ([]models.D
return missingDevices, nil
}
//WWN values NVMe and SCSI
// WWN values NVMe and SCSI
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
block, err := ghw.Block()
if err == nil {
@ -102,12 +101,12 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
}
}
//no WWN found, or could not open Block devices. Either way, fallback to serial number
// no WWN found, or could not open Block devices. Either way, fallback to serial number
if len(detectedDevice.WWN) == 0 {
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
detectedDevice.WWN = detectedDevice.SerialNumber
}
//wwn must always be lowercase.
// wwn must always be lowercase.
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
}

@ -1,10 +1,11 @@
package detect
import (
"strings"
"github.com/analogj/scrutiny/collector/pkg/common/shell"
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/jaypipes/ghw"
"strings"
)
func DevicePrefix() string {
@ -19,15 +20,15 @@ func (d *Detect) Start() ([]models.Device, error) {
return nil, err
}
//inflate device info for detected devices.
for ndx, _ := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
// inflate device info for detected devices.
for ndx := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) // ignore errors.
}
return detectedDevices, nil
}
//WWN values NVMe and SCSI
// WWN values NVMe and SCSI
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
block, err := ghw.Block()
if err == nil {
@ -40,12 +41,12 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
}
}
//no WWN found, or could not open Block devices. Either way, fallback to serial number
// no WWN found, or could not open Block devices. Either way, fallback to serial number
if len(detectedDevice.WWN) == 0 {
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
detectedDevice.WWN = detectedDevice.SerialNumber
}
//wwn must always be lowercase.
// wwn must always be lowercase.
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
}

@ -2,12 +2,13 @@ package detect
import (
"fmt"
"github.com/analogj/scrutiny/collector/pkg/common/shell"
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/jaypipes/ghw"
"io/ioutil"
"path/filepath"
"strings"
"github.com/analogj/scrutiny/collector/pkg/common/shell"
"github.com/analogj/scrutiny/collector/pkg/models"
"github.com/jaypipes/ghw"
)
func DevicePrefix() string {
@ -22,16 +23,16 @@ func (d *Detect) Start() ([]models.Device, error) {
return nil, err
}
//inflate device info for detected devices.
for ndx, _ := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
populateUdevInfo(&detectedDevices[ndx]) //ignore errors.
// inflate device info for detected devices.
for ndx := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) // ignore errors.
populateUdevInfo(&detectedDevices[ndx]) // ignore errors.
}
return detectedDevices, nil
}
//WWN values NVMe and SCSI
// WWN values NVMe and SCSI
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
block, err := ghw.Block()
if err == nil {
@ -44,13 +45,13 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
}
}
//no WWN found, or could not open Block devices. Either way, fallback to serial number
// no WWN found, or could not open Block devices. Either way, fallback to serial number
if len(detectedDevice.WWN) == 0 {
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
detectedDevice.WWN = detectedDevice.SerialNumber
}
//wwn must always be lowercase.
// wwn must always be lowercase.
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
}
@ -86,7 +87,7 @@ func populateUdevInfo(detectedDevice *models.Device) error {
}
}
//Set additional device information.
// Set additional device information.
if deviceLabel, exists := udevInfo["ID_FS_LABEL"]; exists {
detectedDevice.DeviceLabel = deviceLabel
}
@ -97,7 +98,5 @@ func populateUdevInfo(detectedDevice *models.Device) error {
detectedDevice.DeviceSerialID = fmt.Sprintf("%s-%s", udevInfo["ID_BUS"], deviceSerialID)
}
return nil
}

@ -1,16 +1,17 @@
package detect_test
import (
"testing"
"github.com/analogj/scrutiny/collector/pkg/detect"
"github.com/stretchr/testify/require"
"testing"
)
func TestDevicePrefix(t *testing.T) {
//setup
// setup
//test
// test
//assert
// assert
require.Equal(t, "/dev/", detect.DevicePrefix())
}

@ -1,9 +1,10 @@
package detect
import (
"strings"
"github.com/analogj/scrutiny/collector/pkg/common/shell"
"github.com/analogj/scrutiny/collector/pkg/models"
"strings"
)
func DevicePrefix() string {
@ -18,22 +19,21 @@ func (d *Detect) Start() ([]models.Device, error) {
return nil, err
}
//inflate device info for detected devices.
for ndx, _ := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
// inflate device info for detected devices.
for ndx := range detectedDevices {
d.SmartCtlInfo(&detectedDevices[ndx]) // ignore errors.
}
return detectedDevices, nil
}
//WWN values NVMe and SCSI
// WWN values NVMe and SCSI
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
//fallback to serial number
// fallback to serial number
if len(detectedDevice.WWN) == 0 {
detectedDevice.WWN = detectedDevice.SerialNumber
}
//wwn must always be lowercase.
// wwn must always be lowercase.
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
}

@ -46,14 +46,13 @@ type Wwn struct {
*/
func (wwn *Wwn) ToString() string {
var wwnBuffer uint64
wwnBuffer = wwn.Id //start with vendor ID
wwnBuffer += (wwn.Oui << 36) //add left-shifted OUI
wwnBuffer += (wwn.Naa << 60) //NAA is a number from 1-6, so decimal == hex.
wwnBuffer = wwn.Id // start with vendor ID
wwnBuffer += (wwn.Oui << 36) // add left-shifted OUI
wwnBuffer += (wwn.Naa << 60) // NAA is a number from 1-6, so decimal == hex.
//TODO: may need to support additional versions in the future.
// TODO: may need to support additional versions in the future.
return strings.ToLower(fmt.Sprintf("%#x", wwnBuffer))
}

@ -2,28 +2,27 @@ package detect_test
import (
"fmt"
"testing"
"github.com/analogj/scrutiny/collector/pkg/detect"
"github.com/stretchr/testify/require"
"testing"
)
func TestWwn_FromStringTable(t *testing.T) {
//setup
var tests = []struct {
// setup
tests := []struct {
wwnStr string
wwn detect.Wwn
}{
{"0x5002538e40a22954", detect.Wwn{Naa: 5, Oui: 9528, Id: 61213911380}}, //sda
{"0x5000cca264eb01d7", detect.Wwn{Naa: 5, Oui: 3274, Id: 10283057623}}, //sdb
{"0x5000cca264ec3183", detect.Wwn{Naa: 5, Oui: 3274, Id: 10283135363}}, //sdc
{"0x5000cca252c859cc", detect.Wwn{Naa: 5, Oui: 3274, Id: 9978796492}}, //sdd
{"0x50014ee20b2a72a9", detect.Wwn{Naa: 5, Oui: 5358, Id: 8777265833}}, //sde
{"0x5000cca264ebc248", detect.Wwn{Naa: 5, Oui: 3274, Id: 10283106888}}, //sdf
{"0x5000c500673e6b5f", detect.Wwn{Naa: 5, Oui: 3152, Id: 1732143967}}, //sdg
{"0x5002538e40a22954", detect.Wwn{Naa: 5, Oui: 9528, Id: 61213911380}}, // sda
{"0x5000cca264eb01d7", detect.Wwn{Naa: 5, Oui: 3274, Id: 10283057623}}, // sdb
{"0x5000cca264ec3183", detect.Wwn{Naa: 5, Oui: 3274, Id: 10283135363}}, // sdc
{"0x5000cca252c859cc", detect.Wwn{Naa: 5, Oui: 3274, Id: 9978796492}}, // sdd
{"0x50014ee20b2a72a9", detect.Wwn{Naa: 5, Oui: 5358, Id: 8777265833}}, // sde
{"0x5000cca264ebc248", detect.Wwn{Naa: 5, Oui: 3274, Id: 10283106888}}, // sdf
{"0x5000c500673e6b5f", detect.Wwn{Naa: 5, Oui: 3152, Id: 1732143967}}, // sdg
}
//test
// test
for _, tt := range tests {
testname := fmt.Sprintf("%s", tt.wwnStr)
t.Run(testname, func(t *testing.T) {
@ -31,5 +30,4 @@ func TestWwn_FromStringTable(t *testing.T) {
require.Equal(t, tt.wwnStr, str)
})
}
}

@ -1,9 +1,10 @@
package errors_test
import (
"testing"
"github.com/analogj/scrutiny/collector/pkg/errors"
"github.com/stretchr/testify/require"
"testing"
)
//func TestCheckErr_WithoutError(t *testing.T) {
@ -27,7 +28,7 @@ import (
func TestErrors(t *testing.T) {
t.Parallel()
//assert
// assert
require.Implements(t, (*error)(nil), errors.ConfigFileMissingError("test"), "should implement the error interface")
require.Implements(t, (*error)(nil), errors.ConfigValidationError("test"), "should implement the error interface")
require.Implements(t, (*error)(nil), errors.DependencyMissingError("test"), "should implement the error interface")

@ -4,9 +4,9 @@ type Device struct {
WWN string `json:"wwn"`
DeviceName string `json:"device_name"`
DeviceUUID string `json:"device_uuid"`
DeviceSerialID string `json:"device_serial_id"`
DeviceLabel string `json:"device_label"`
DeviceUUID string `json:"device_uuid"`
DeviceSerialID string `json:"device_serial_id"`
DeviceLabel string `json:"device_label"`
Manufacturer string `json:"manufacturer"`
ModelName string `json:"model_name"`
@ -18,8 +18,8 @@ type Device struct {
Capacity int64 `json:"capacity"`
FormFactor string `json:"form_factor"`
SmartSupport bool `json:"smart_support"`
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
DeviceProtocol string `json:"device_protocol"` // protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` // device type is used for querying with -d/t flag, should only be used by collector.
// User provided metadata
Label string `json:"label"`

@ -18,6 +18,7 @@ require (
github.com/spf13/viper v1.14.0
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.2.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.1.0
gorm.io/gorm v1.23.5
)

@ -650,6 +650,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
@ -762,6 +763,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=

@ -3,26 +3,28 @@ package main
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
"github.com/sirupsen/logrus"
"io"
"log"
"os"
"time"
utils "github.com/analogj/go-util/utils"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
)
var goos string
var goarch string
var (
goos string
goarch string
)
func main() {
config, err := config.Create()
if err != nil {
fmt.Printf("FATAL: %+v\n", err)
@ -35,10 +37,10 @@ func main() {
configFilePath = configFilePathAlternative
}
//we're going to load the config file manually, since we need to validate it.
err = config.ReadConfig(configFilePath) // Find and read the config file
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
//ignore "could not find config file"
// we're going to load the config file manually, since we need to validate it.
err = config.ReadConfig(configFilePath) // Find and read the config file
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
// ignore "could not find config file"
} else if err != nil {
log.Print(color.HiRedString("CONFIG ERROR: %v", err))
os.Exit(1)
@ -69,7 +71,6 @@ OPTIONS:
},
},
Before: func(c *cli.Context) error {
scrutiny := "github.com/AnalogJ/scrutiny"
var versionInfo string
@ -103,7 +104,7 @@ OPTIONS:
if c.IsSet("config") {
err = config.ReadConfig(c.String("config")) // Find and read the config file
if err != nil { // Handle errors reading the config file
//ignore "could not find config file"
// ignore "could not find config file"
fmt.Printf("Could not find config file at specified path: %s", c.String("config"))
return err
}
@ -159,14 +160,13 @@ OPTIONS:
if err != nil {
log.Fatal(color.HiRedString("ERROR: %v", err))
}
}
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
logger := logrus.WithFields(logrus.Fields{
"type": "web",
})
//set default log level
// set default log level
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
logger.Logger.SetLevel(level)
} else {
@ -176,7 +176,7 @@ func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
var logFile *os.File
var err error
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
return nil, logFile, err

@ -1,12 +1,13 @@
package config
import (
"github.com/analogj/go-util/utils"
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
"github.com/spf13/viper"
"log"
"os"
"strings"
"github.com/analogj/go-util/utils"
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
"github.com/spf13/viper"
)
const DB_USER_SETTINGS_SUBKEY = "user"
@ -19,7 +20,7 @@ type configuration struct {
*viper.Viper
}
//Viper uses the following precedence order. Each item takes precedence over the item below it:
// Viper uses the following precedence order. Each item takes precedence over the item below it:
// explicit call to Set
// flag
// env
@ -29,7 +30,7 @@ type configuration struct {
func (c *configuration) Init() error {
c.Viper = viper.New()
//set defaults
// set defaults
c.SetDefault("web.listen.port", "8080")
c.SetDefault("web.listen.host", "0.0.0.0")
c.SetDefault("web.listen.basepath", "")
@ -52,20 +53,20 @@ func (c *configuration) Init() error {
c.SetDefault("web.influxdb.tls.insecure_skip_verify", false)
c.SetDefault("web.influxdb.retention_policy", true)
//c.SetDefault("disks.include", []string{})
//c.SetDefault("disks.exclude", []string{})
// c.SetDefault("disks.include", []string{})
// c.SetDefault("disks.exclude", []string{})
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
// if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
c.SetConfigType("yaml")
//c.SetConfigName("drawbridge")
//c.AddConfigPath("$HOME/")
// c.SetConfigName("drawbridge")
// c.AddConfigPath("$HOME/")
//configure env variable parsing.
// configure env variable parsing.
c.SetEnvPrefix("SCRUTINY")
c.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_"))
c.AutomaticEnv()
//CLI options will be added via the `Set()` function
// CLI options will be added via the `Set()` function
return c.ValidateConfig()
}
@ -81,7 +82,7 @@ func (c *configuration) Sub(key string) Interface {
}
func (c *configuration) ReadConfig(configFilePath string) error {
//make sure that we specify that this is the correct config path (for eventual WriteConfig() calls)
// make sure that we specify that this is the correct config path (for eventual WriteConfig() calls)
c.SetConfigFile(configFilePath)
configFilePath, err := utils.ExpandPath(configFilePath)
@ -119,7 +120,6 @@ func (c *configuration) ReadConfig(configFilePath string) error {
// This function ensures that the merged config works correctly.
func (c *configuration) ValidateConfig() error {
//the following keys are deprecated, and no longer supported
/*
- notify.filter_attributes (replaced by metrics.status.filter_attributes SETTING)

@ -1,13 +1,14 @@
package config
import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"testing"
)
func Test_MergeConfigMap(t *testing.T) {
//setup
// setup
testConfig := configuration{
Viper: viper.New(),
}
@ -20,15 +21,14 @@ func Test_MergeConfigMap(t *testing.T) {
"layout": "layout",
},
}
//test
// test
err := testConfig.MergeConfigMap(mergeSettings)
//verify
// verify
require.NoError(t, err)
// if using Set, the MergeConfigMap functionality will not override
// if using SetDefault, the MergeConfigMap will override correctly
require.Equal(t, "hello", testConfig.GetString("user.dashboard_display"))
require.Equal(t, "layout", testConfig.GetString("user.layout"))
}

@ -1,11 +1,14 @@
package pkg
const DeviceProtocolAta = "ATA"
const DeviceProtocolScsi = "SCSI"
const DeviceProtocolNvme = "NVMe"
const (
DeviceProtocolAta = "ATA"
DeviceProtocolScsi = "SCSI"
DeviceProtocolNvme = "NVMe"
)
//go:generate stringer -type=AttributeStatus
// AttributeStatus bitwise flag, 1,2,4,8,16,32,etc
//
//go:generate stringer -type=AttributeStatus
type AttributeStatus uint8
const (
@ -15,16 +18,19 @@ const (
AttributeStatusFailedScrutiny AttributeStatus = 4
)
const AttributeWhenFailedFailingNow = "FAILING_NOW"
const AttributeWhenFailedInThePast = "IN_THE_PAST"
const (
AttributeWhenFailedFailingNow = "FAILING_NOW"
AttributeWhenFailedInThePast = "IN_THE_PAST"
)
func AttributeStatusSet(b, flag AttributeStatus) AttributeStatus { return b | flag }
func AttributeStatusClear(b, flag AttributeStatus) AttributeStatus { return b &^ flag }
func AttributeStatusToggle(b, flag AttributeStatus) AttributeStatus { return b ^ flag }
func AttributeStatusHas(b, flag AttributeStatus) bool { return b&flag != 0 }
//go:generate stringer -type=DeviceStatus
// DeviceStatus bitwise flag, 1,2,4,8,16,32,etc
//
//go:generate stringer -type=DeviceStatus
type DeviceStatus uint8
const (
@ -60,6 +66,6 @@ const (
MetricsStatusThresholdSmart MetricsStatusThreshold = 1
MetricsStatusThresholdScrutiny MetricsStatusThreshold = 2
//shortcut
// shortcut
MetricsStatusThresholdBoth MetricsStatusThreshold = 3
)

@ -23,9 +23,9 @@ type DeviceRepo interface {
DeleteDevice(ctx context.Context, wwn string) error
SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error)
GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error)
GetSmartAttributeHistory(ctx context.Context, wwn, durationKey string, selectEntries, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error)
SaveSmartTemperature(ctx context.Context, wwn string, deviceProtocol string, collectorSmartData collector.SmartInfo) error
SaveSmartTemperature(ctx context.Context, wwn, deviceProtocol string, collectorSmartData collector.SmartInfo) error
GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error)
GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error)

@ -6,7 +6,7 @@ import (
// Deprecated: m20201107210306.Device is deprecated, only used by db migrations
type Device struct {
//GORM attributes, see: http://gorm.io/docs/conventions.html
// GORM attributes, see: http://gorm.io/docs/conventions.html
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
@ -25,14 +25,16 @@ type Device struct {
Capacity int64 `json:"capacity"`
FormFactor string `json:"form_factor"`
SmartSupport bool `json:"smart_support"`
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
DeviceProtocol string `json:"device_protocol"` // protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` // device type is used for querying with -d/t flag, should only be used by collector.
SmartResults []Smart `gorm:"foreignkey:DeviceWWN" json:"smart_results"`
}
const DeviceProtocolAta = "ATA"
const DeviceProtocolScsi = "SCSI"
const DeviceProtocolNvme = "NVMe"
const (
DeviceProtocolAta = "ATA"
DeviceProtocolScsi = "SCSI"
DeviceProtocolNvme = "NVMe"
)
func (dv *Device) IsAta() bool {
return dv.DeviceProtocol == DeviceProtocolAta

@ -1,8 +1,9 @@
package m20201107210306
import (
"gorm.io/gorm"
"time"
"gorm.io/gorm"
)
// Deprecated: m20201107210306.Smart is deprecated, only used by db migrations
@ -15,7 +16,7 @@ type Smart struct {
TestDate time.Time `json:"date"`
SmartStatus string `json:"smart_status"` // SmartStatusPassed or SmartStatusFailed
//Metrics
// Metrics
Temp int64 `json:"temp"`
PowerOnHours int64 `json:"power_on_hours"`
PowerCycleCount int64 `json:"power_cycle_count"`

@ -9,7 +9,7 @@ type SmartNvmeAttribute struct {
SmartId int `json:"smart_id"`
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
AttributeId string `json:"attribute_id"` //json string from smartctl
AttributeId string `json:"attribute_id"` // json string from smartctl
Name string `json:"name"`
Value int `json:"value"`
Threshold int `json:"thresh"`

@ -9,7 +9,7 @@ type SmartScsiAttribute struct {
SmartId int `json:"smart_id"`
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
AttributeId string `json:"attribute_id"` //json string from smartctl
AttributeId string `json:"attribute_id"` // json string from smartctl
Name string `json:"name"`
Value int `json:"value"`
Threshold int `json:"thresh"`

@ -1,13 +1,14 @@
package m20220503120000
import (
"github.com/analogj/scrutiny/webapp/backend/pkg"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg"
)
// Deprecated: m20220503120000.Device is deprecated, only used by db migrations
type Device struct {
//GORM attributes, see: http://gorm.io/docs/conventions.html
// GORM attributes, see: http://gorm.io/docs/conventions.html
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
@ -25,8 +26,8 @@ type Device struct {
Capacity int64 `json:"capacity"`
FormFactor string `json:"form_factor"`
SmartSupport bool `json:"smart_support"`
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
DeviceProtocol string `json:"device_protocol"` // protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` // device type is used for querying with -d/t flag, should only be used by collector.
// User provided metadata
Label string `json:"label"`

@ -1,12 +1,13 @@
package m20220509170100
import (
"github.com/analogj/scrutiny/webapp/backend/pkg"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg"
)
type Device struct {
//GORM attributes, see: http://gorm.io/docs/conventions.html
// GORM attributes, see: http://gorm.io/docs/conventions.html
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
@ -14,9 +15,9 @@ type Device struct {
WWN string `json:"wwn" gorm:"primary_key"`
DeviceName string `json:"device_name"`
DeviceUUID string `json:"device_uuid"`
DeviceSerialID string `json:"device_serial_id"`
DeviceLabel string `json:"device_label"`
DeviceUUID string `json:"device_uuid"`
DeviceSerialID string `json:"device_serial_id"`
DeviceLabel string `json:"device_label"`
Manufacturer string `json:"manufacturer"`
ModelName string `json:"model_name"`
@ -28,8 +29,8 @@ type Device struct {
Capacity int64 `json:"capacity"`
FormFactor string `json:"form_factor"`
SmartSupport bool `json:"smart_support"`
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
DeviceProtocol string `json:"device_protocol"` // protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` // device type is used for querying with -d/t flag, should only be used by collector.
// User provided metadata
Label string `json:"label"`
@ -38,4 +39,3 @@ type Device struct {
// Data set by Scrutiny
DeviceStatus pkg.DeviceStatus `json:"device_status"`
}

@ -5,7 +5,7 @@ import (
)
type Setting struct {
//GORM attributes, see: http://gorm.io/docs/conventions.html
// GORM attributes, see: http://gorm.io/docs/conventions.html
gorm.Model
SettingKeyName string `json:"setting_key_name"`

@ -5,6 +5,11 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
"github.com/glebarez/sqlite"
@ -13,10 +18,6 @@ import (
"github.com/influxdata/influxdb-client-go/v2/domain"
"github.com/sirupsen/logrus"
"gorm.io/gorm"
"io/ioutil"
"net/http"
"net/url"
"time"
)
const (
@ -77,8 +78,8 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
"busy_timeout": "30000",
})
database, err := gorm.Open(sqlite.Open(appConfig.GetString("web.database.location")+pragmaStr), &gorm.Config{
//TODO: figure out how to log database queries again.
//Logger: logger
// TODO: figure out how to log database queries again.
// Logger: logger
DisableForeignKeyConstraintWhenMigrating: true,
})
if err != nil {
@ -86,7 +87,7 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
}
globalLogger.Infof("Successfully connected to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
//database.SetLogger()
// database.SetLogger()
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// InfluxDB setup
@ -107,7 +108,7 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
influxdb2.DefaultOptions().SetTLSConfig(tlsConfig),
)
//if !appConfig.IsSet("web.influxdb.token") {
// if !appConfig.IsSet("web.influxdb.token") {
globalLogger.Debugf("Determine Influxdb setup status...")
influxSetupComplete, err := InfluxSetupComplete(influxdbUrl, tlsConfig)
if err != nil {
@ -206,7 +207,7 @@ func (sr *scrutinyRepository) Close() error {
}
func (sr *scrutinyRepository) HealthCheck(ctx context.Context) error {
//check influxdb
// check influxdb
status, err := sr.influxClient.Health(ctx)
if err != nil {
return fmt.Errorf("influxdb healthcheck failed: %w", err)
@ -215,7 +216,7 @@ func (sr *scrutinyRepository) HealthCheck(ctx context.Context) error {
return fmt.Errorf("influxdb healthcheckf failed: status=%s", status.Status)
}
//check sqlite db.
// check sqlite db.
database, err := sr.gormClient.DB()
if err != nil {
return fmt.Errorf("sqlite healthcheck failed: %w", err)
@ -225,7 +226,6 @@ func (sr *scrutinyRepository) HealthCheck(ctx context.Context) error {
return fmt.Errorf("sqlite healthcheck failed during ping: %w", err)
}
return nil
}
func InfluxSetupComplete(influxEndpoint string, tlsConfig *tls.Config) (bool, error) {
@ -238,7 +238,7 @@ func InfluxSetupComplete(influxEndpoint string, tlsConfig *tls.Config) (bool, er
return false, err
}
client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
res, err := client.Get(influxUri.String())
if err != nil {
return false, err
@ -261,7 +261,6 @@ func InfluxSetupComplete(influxEndpoint string, tlsConfig *tls.Config) (bool, er
}
func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Organization) error {
var mainBucketRetentionRule domain.RetentionRule
var weeklyBucketRetentionRule domain.RetentionRule
var monthlyBucketRetentionRule domain.RetentionRule
@ -282,12 +281,12 @@ func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Org
return err
}
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
//correctly set the retention period for the main bucket (cant do it during setup/creation)
// correctly set the retention period for the main bucket (cant do it during setup/creation)
foundMainBucket.RetentionRules = domain.RetentionRules{mainBucketRetentionRule}
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundMainBucket)
}
//create buckets (used for downsampling)
// create buckets (used for downsampling)
weeklyBucket := fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
if foundWeeklyBucket, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, weeklyBucket); foundErr != nil {
// metrics_weekly bucket will have a retention period of 8+1 weeks (since it will be down-sampled once a month)
@ -296,7 +295,7 @@ func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Org
return err
}
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
//correctly set the retention period for the bucket (may not be able to do it during setup/creation)
// correctly set the retention period for the bucket (may not be able to do it during setup/creation)
foundWeeklyBucket.RetentionRules = domain.RetentionRules{weeklyBucketRetentionRule}
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundWeeklyBucket)
}
@ -309,7 +308,7 @@ func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Org
return err
}
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
//correctly set the retention period for the bucket (may not be able to do it during setup/creation)
// correctly set the retention period for the bucket (may not be able to do it during setup/creation)
foundMonthlyBucket.RetentionRules = domain.RetentionRules{monthlyBucketRetentionRule}
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundMonthlyBucket)
}
@ -344,7 +343,7 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
}
// Get parser flux query result
//appConfig.GetString("web.influxdb.bucket")
// appConfig.GetString("web.influxdb.bucket")
queryStr := fmt.Sprintf(`
import "influxdata/influxdb/schema"
bucketBaseName = "%s"
@ -396,15 +395,15 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
for result.Next() {
// Observe when there is new grouping key producing new table
if result.TableChanged() {
//fmt.Printf("table: %s\n", result.TableMetadata().String())
// fmt.Printf("table: %s\n", result.TableMetadata().String())
}
// read result
//get summary data from Influxdb.
//result.Record().Values()
// get summary data from Influxdb.
// result.Record().Values()
if deviceWWN, ok := result.Record().Values()["device_wwn"]; ok {
//ensure summaries is intialized for this wwn
// ensure summaries is intialized for this wwn
if _, exists := summaries[deviceWWN.(string)]; !exists {
summaries[deviceWWN.(string)] = &models.DeviceSummary{}
}
@ -446,7 +445,7 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
func (sr *scrutinyRepository) lookupBucketName(durationKey string) string {
switch durationKey {
case DURATION_KEY_WEEK:
//data stored in the last week
// data stored in the last week
return sr.appConfig.GetString("web.influxdb.bucket")
case DURATION_KEY_MONTH:
// data stored in the last month (after the first week)
@ -455,17 +454,16 @@ func (sr *scrutinyRepository) lookupBucketName(durationKey string) string {
// data stored in the last year (after the first month)
return fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
case DURATION_KEY_FOREVER:
//data stored before the last year
// data stored before the last year
return fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))
}
return sr.appConfig.GetString("web.influxdb.bucket")
}
func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
switch durationKey {
case DURATION_KEY_WEEK:
//data stored in the last week
// data stored in the last week
return []string{"-1w", "now()"}
case DURATION_KEY_MONTH:
// data stored in the last month (after the first week)
@ -474,7 +472,7 @@ func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
// data stored in the last year (after the first month)
return []string{"-1y", "-1mo"}
case DURATION_KEY_FOREVER:
//data stored before the last year
// data stored before the last year
return []string{"-10y", "-1y"}
}
return []string{"-1w", "now()"}
@ -483,16 +481,16 @@ func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []string {
switch durationKey {
case DURATION_KEY_WEEK:
//all data is stored in a single bucket
// all data is stored in a single bucket
return []string{DURATION_KEY_WEEK}
case DURATION_KEY_MONTH:
//data is stored in the week bucket and the month bucket
// data is stored in the week bucket and the month bucket
return []string{DURATION_KEY_WEEK, DURATION_KEY_MONTH}
case DURATION_KEY_YEAR:
// data stored in the last year (after the first month)
return []string{DURATION_KEY_WEEK, DURATION_KEY_MONTH, DURATION_KEY_YEAR}
case DURATION_KEY_FOREVER:
//data stored before the last year
// data stored before the last year
return []string{DURATION_KEY_WEEK, DURATION_KEY_MONTH, DURATION_KEY_YEAR, DURATION_KEY_FOREVER}
}
return []string{DURATION_KEY_WEEK}

@ -3,18 +3,19 @@ package database
import (
"context"
"fmt"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"gorm.io/gorm/clause"
"time"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Device
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//insert device into DB (and update specified columns if device is already registered)
// insert device into DB (and update specified columns if device is already registered)
// update device fields that may change: (DeviceType, HostID)
func (sr *scrutinyRepository) RegisterDevice(ctx context.Context, dev models.Device) error {
if err := sr.gormClient.WithContext(ctx).Clauses(clause.OnConflict{
@ -28,7 +29,7 @@ func (sr *scrutinyRepository) RegisterDevice(ctx context.Context, dev models.Dev
// get a list of all devices (only device metadata, no SMART data)
func (sr *scrutinyRepository) GetDevices(ctx context.Context) ([]models.Device, error) {
//Get a list of all the active devices.
// Get a list of all the active devices.
devices := []models.Device{}
if err := sr.gormClient.WithContext(ctx).Find(&devices).Error; err != nil {
return nil, fmt.Errorf("Could not get device summary from DB: %v", err)
@ -43,7 +44,7 @@ func (sr *scrutinyRepository) UpdateDevice(ctx context.Context, wwn string, coll
return device, fmt.Errorf("Could not get device from DB: %v", err)
}
//TODO catch GormClient err
// TODO catch GormClient err
err := device.UpdateFromCollectorSmartInfo(collectorSmartData)
if err != nil {
return device, err
@ -51,7 +52,7 @@ func (sr *scrutinyRepository) UpdateDevice(ctx context.Context, wwn string, coll
return device, sr.gormClient.Model(&device).Updates(device).Error
}
//Update Device Status
// Update Device Status
func (sr *scrutinyRepository) UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error) {
var device models.Device
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
@ -79,7 +80,7 @@ func (sr *scrutinyRepository) DeleteDevice(ctx context.Context, wwn string) erro
return err
}
//delete data from influxdb.
// delete data from influxdb.
buckets := []string{
sr.appConfig.GetString("web.influxdb.bucket"),
fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket")),

@ -13,9 +13,9 @@ import (
log "github.com/sirupsen/logrus"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// SMART
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error) {
deviceSmartData := measurements.Smart{}
err := deviceSmartData.FromCollectorSmartInfo(wwn, collectorSmartData)
@ -34,13 +34,13 @@ func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn strin
// When selectEntries is > 0, only the most recent selectEntries database entries are returned, starting from the selectEntriesOffset entry.
// For example, with selectEntries = 5, selectEntries = 0, the most recent 5 are returned. With selectEntries = 3, selectEntries = 2, entries
// 2 to 4 are returned (2 being the third newest, since it is zero-indexed)
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error) {
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn, durationKey string, selectEntries, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error) {
// Get SMartResults from InfluxDB
//TODO: change the filter startrange to a real number.
// TODO: change the filter startrange to a real number.
// Get parser flux query result
//appConfig.GetString("web.influxdb.bucket")
// appConfig.GetString("web.influxdb.bucket")
queryStr := sr.aggregateSmartAttributesQuery(wwn, durationKey, selectEntries, selectEntriesOffset, attributes)
log.Infoln(queryStr)
@ -52,7 +52,7 @@ func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn
for result.Next() {
// Observe when there is new grouping key producing new table
if result.TableChanged() {
//fmt.Printf("table: %s\n", result.TableMetadata().String())
// fmt.Printf("table: %s\n", result.TableMetadata().String())
}
smartData, err := measurements.NewSmartFromInfluxDB(result.Record().Values())
@ -82,7 +82,6 @@ func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn
// c.JSON(http.StatusInternalServerError, gin.H{"success": false})
// return
//}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -90,7 +89,7 @@ func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) saveDatapoint(influxWriteApi api.WriteAPIBlocking, measurement string, tags map[string]string, fields map[string]interface{}, date time.Time, ctx context.Context) error {
//sr.logger.Debugf("Storing datapoint in measurement '%s'. tags: %d fields: %d", measurement, len(tags), len(fields))
// sr.logger.Debugf("Storing datapoint in measurement '%s'. tags: %d fields: %d", measurement, len(tags), len(fields))
p := influxdb2.NewPoint(measurement,
tags,
fields,
@ -100,8 +99,7 @@ func (sr *scrutinyRepository) saveDatapoint(influxWriteApi api.WriteAPIBlocking,
return influxWriteApi.WritePoint(ctx, p)
}
func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) string {
func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn, durationKey string, selectEntries, selectEntriesOffset int, attributes []string) string {
/*
import "influxdata/influxdb/schema"
@ -148,7 +146,7 @@ func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, duration
nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey)
if len(nestedDurationKeys) == 1 {
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
// there's only one bucket being queried, no need to union, just aggregate the dataset and return
partialQueryStr = append(partialQueryStr, []string{
sr.generateSmartAttributesSubquery(wwn, nestedDurationKeys[0], selectEntries, selectEntriesOffset, attributes),
fmt.Sprintf(`%sData`, nestedDurationKeys[0]),
@ -184,7 +182,7 @@ func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, duration
return strings.Join(partialQueryStr, "\n")
}
func (sr *scrutinyRepository) generateSmartAttributesSubquery(wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) string {
func (sr *scrutinyRepository) generateSmartAttributesSubquery(wwn, durationKey string, selectEntries, selectEntriesOffset int, attributes []string) string {
bucketName := sr.lookupBucketName(durationKey)
durationRange := sr.lookupDuration(durationKey)

@ -28,7 +28,6 @@ import (
//database.AutoMigrate(&models.Device{})
func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
sr.logger.Infoln("Database migration starting. Please wait, this process may take a long time....")
gormMigrateOptions := gormigrate.DefaultOptions
@ -58,18 +57,18 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
return err
}
//add columns to the Device schema, so we can start adding data to the database & influxdb
err = tx.Migrator().AddColumn(&models.Device{}, "Label") //Label string `json:"label"`
// add columns to the Device schema, so we can start adding data to the database & influxdb
err = tx.Migrator().AddColumn(&models.Device{}, "Label") // Label string `json:"label"`
if err != nil {
return err
}
err = tx.Migrator().AddColumn(&models.Device{}, "DeviceStatus") //DeviceStatus pkg.DeviceStatus `json:"device_status"`
err = tx.Migrator().AddColumn(&models.Device{}, "DeviceStatus") // DeviceStatus pkg.DeviceStatus `json:"device_status"`
if err != nil {
return err
}
//TODO: migrate the data from GORM to influxdb.
//get a list of all devices:
// TODO: migrate the data from GORM to influxdb.
// get a list of all devices:
// get a list of all smart scans in the last 2 weeks:
// get a list of associated smart attribute data:
// translate to a measurements.Smart{} object
@ -81,31 +80,31 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
// get a list of all smart scans:
// do same as above (select 1 scan per year)
preDevices := []m20201107210306.Device{} //pre-migration device information
preDevices := []m20201107210306.Device{} // pre-migration device information
if err = tx.Preload("SmartResults", func(db *gorm.DB) *gorm.DB {
return db.Order("smarts.created_at ASC") //OLD: .Limit(devicesCount)
return db.Order("smarts.created_at ASC") // OLD: .Limit(devicesCount)
}).Find(&preDevices).Error; err != nil {
sr.logger.Errorln("Could not get device summary from DB", err)
return err
}
//calculate bucket oldest dates
// calculate bucket oldest dates
today := time.Now()
dailyBucketMax := today.Add(-RETENTION_PERIOD_15_DAYS_IN_SECONDS * time.Second) //15 days
weeklyBucketMax := today.Add(-RETENTION_PERIOD_9_WEEKS_IN_SECONDS * time.Second) //9 weeks
monthlyBucketMax := today.Add(-RETENTION_PERIOD_25_MONTHS_IN_SECONDS * time.Second) //25 weeks
dailyBucketMax := today.Add(-RETENTION_PERIOD_15_DAYS_IN_SECONDS * time.Second) // 15 days
weeklyBucketMax := today.Add(-RETENTION_PERIOD_9_WEEKS_IN_SECONDS * time.Second) // 9 weeks
monthlyBucketMax := today.Add(-RETENTION_PERIOD_25_MONTHS_IN_SECONDS * time.Second) // 25 weeks
for _, preDevice := range preDevices {
sr.logger.Debugf("====================================")
sr.logger.Infof("begin processing device: %s", preDevice.WWN)
//weekly, monthly, yearly lookup storage, so we don't add more data to the buckets than necessary.
// weekly, monthly, yearly lookup storage, so we don't add more data to the buckets than necessary.
weeklyLookup := map[string]bool{}
monthlyLookup := map[string]bool{}
yearlyLookup := map[string]bool{}
for _, preSmartResult := range preDevice.SmartResults { //pre-migration smart results
for _, preSmartResult := range preDevice.SmartResults { // pre-migration smart results
//we're looping in ASC mode, so from oldest entry to most current.
// we're looping in ASC mode, so from oldest entry to most current.
err, postSmartResults := m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(tx, preDevice, preSmartResult)
if err != nil {
@ -127,7 +126,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
yearMonthStr := fmt.Sprintf("%d-%d", year, month)
yearWeekStr := fmt.Sprintf("%d-%d", year, week)
//write data to daily bucket if in the last 15 days
// write data to daily bucket if in the last 15 days
if postSmartResults.Date.After(dailyBucketMax) {
sr.logger.Debugf("device (%s) smart data added to bucket: daily", preDevice.WWN)
// write point immediately
@ -152,11 +151,11 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
}
}
//write data to the weekly bucket if in the last 9 weeks, and week has not been processed yet
// write data to the weekly bucket if in the last 9 weeks, and week has not been processed yet
if _, weekExists := weeklyLookup[yearWeekStr]; !weekExists && postSmartResults.Date.After(weeklyBucketMax) {
sr.logger.Debugf("device (%s) smart data added to bucket: weekly", preDevice.WWN)
//this week/year pair has not been processed
// this week/year pair has not been processed
weeklyLookup[yearWeekStr] = true
// write point immediately
err = sr.saveDatapoint(
@ -181,11 +180,11 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
}
}
//write data to the monthly bucket if in the last 9 weeks, and week has not been processed yet
// write data to the monthly bucket if in the last 9 weeks, and week has not been processed yet
if _, monthExists := monthlyLookup[yearMonthStr]; !monthExists && postSmartResults.Date.After(monthlyBucketMax) {
sr.logger.Debugf("device (%s) smart data added to bucket: monthly", preDevice.WWN)
//this month/year pair has not been processed
// this month/year pair has not been processed
monthlyLookup[yearMonthStr] = true
// write point immediately
err = sr.saveDatapoint(
@ -212,7 +211,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
if _, yearExists := yearlyLookup[yearStr]; !yearExists && year != today.Year() {
sr.logger.Debugf("device (%s) smart data added to bucket: yearly", preDevice.WWN)
//this year has not been processed
// this year has not been processed
yearlyLookup[yearStr] = true
// write point immediately
err = sr.saveDatapoint(
@ -257,15 +256,14 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
return err
}
//migrate the device database
// migrate the device database
return tx.AutoMigrate(m20220503120000.Device{})
},
},
{
ID: "m20220509170100", // addl udev device data
Migrate: func(tx *gorm.DB) error {
//migrate the device database.
// migrate the device database.
// adding addl columns (device_label, device_uuid, device_serial_id)
return tx.AutoMigrate(m20220509170100.Device{})
},
@ -273,7 +271,6 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
{
ID: "m20220709181300",
Migrate: func(tx *gorm.DB) error {
// delete devices with empty `wwn` field (they are impossible to delete manually), and are invalid.
return tx.Where("wwn = ?", "").Delete(&models.Device{}).Error
},
@ -281,15 +278,14 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
{
ID: "m20220716214900", // add settings table.
Migrate: func(tx *gorm.DB) error {
// adding the settings table.
err := tx.AutoMigrate(m20220716214900.Setting{})
if err != nil {
return err
}
//add defaults.
// add defaults.
var defaultSettings = []m20220716214900.Setting{
defaultSettings := []m20220716214900.Setting{
{
SettingKeyName: "theme",
SettingKeyDescription: "Frontend theme ('light' | 'dark' | 'system')",
@ -358,8 +354,8 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
{
ID: "m20221115214900", // add line_stroke setting.
Migrate: func(tx *gorm.DB) error {
//add line_stroke setting default.
var defaultSettings = []m20220716214900.Setting{
// add line_stroke setting default.
defaultSettings := []m20220716214900.Setting{
{
SettingKeyName: "line_stroke",
SettingKeyDescription: "Temperature chart line stroke ('smooth' | 'straight' | 'stepline')",
@ -373,8 +369,8 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
{
ID: "m20231123123300", // add repeat_notifications setting.
Migrate: func(tx *gorm.DB) error {
//add repeat_notifications setting default.
var defaultSettings = []m20220716214900.Setting{
// add repeat_notifications setting default.
defaultSettings := []m20220716214900.Setting{
{
SettingKeyName: "metrics.repeat_notifications",
SettingKeyDescription: "Whether to repeat all notifications or just when values change (true | false)",
@ -393,7 +389,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
}
sr.logger.Infoln("Database migration completed successfully")
//these migrations cannot be done within a transaction, so they are done as a separate group, with `UseTransaction = false`
// these migrations cannot be done within a transaction, so they are done as a separate group, with `UseTransaction = false`
sr.logger.Infoln("SQLite global configuration migrations starting. Please wait....")
globalMigrateOptions := gormigrate.DefaultOptions
globalMigrateOptions.UseTransaction = false
@ -401,7 +397,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
{
ID: "g20220802211500",
Migrate: func(tx *gorm.DB) error {
//shrink the Database (maybe necessary after 20220503113100)
// shrink the Database (maybe necessary after 20220503113100)
if err := tx.Exec("VACUUM;").Error; err != nil {
return err
}
@ -421,8 +417,8 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
// helpers
//When adding data to influxdb, an error may be returned if the data point is outside the range of the retention policy.
//This function will ignore retention policy errors, and allow the migration to continue.
// When adding data to influxdb, an error may be returned if the data point is outside the range of the retention policy.
// This function will ignore retention policy errors, and allow the migration to continue.
func ignorePastRetentionPolicyError(err error) error {
var influxDbWriteError *http.Error
if errors.As(err, &influxDbWriteError) {
@ -436,7 +432,7 @@ func ignorePastRetentionPolicyError(err error) error {
// Deprecated
func m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.SmartTemperature) {
//extract temperature data for every datapoint
// extract temperature data for every datapoint
postSmartTemp := measurements.SmartTemperature{
Date: preSmartResult.TestDate,
Temp: preSmartResult.Temp,
@ -447,7 +443,7 @@ func m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice m202011
// Deprecated
func m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(database *gorm.DB, preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.Smart) {
//create a measurements.Smart object (which we will then push to the InfluxDB)
// create a measurements.Smart object (which we will then push to the InfluxDB)
postDeviceSmartData := measurements.Smart{
Date: preSmartResult.TestDate,
DeviceWWN: preDevice.WWN,
@ -508,7 +504,7 @@ func m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(d
postDeviceSmartData.ProcessAtaSmartInfo(preAtaSmartAttributesTable)
} else if preDevice.IsNvme() {
//info collector.SmartInfo
// info collector.SmartInfo
postNvmeSmartHealthInformation := collector.NvmeSmartHealthInformationLog{}
for _, preNvmeAttribute := range preSmartResult.NvmeAttributes {
@ -553,7 +549,7 @@ func m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(d
postDeviceSmartData.ProcessNvmeSmartInfo(postNvmeSmartHealthInformation)
} else if preDevice.IsScsi() {
//info collector.SmartInfo
// info collector.SmartInfo
var postScsiGrownDefectList int64
postScsiErrorCounterLog := collector.ScsiErrorCounterLog{
Read: struct {

@ -3,10 +3,11 @@ package database
import (
"context"
"fmt"
"strings"
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
"github.com/mitchellh/mapstructure"
"strings"
)
// LoadSettings will retrieve settings from the database, store them in the AppConfig object, and return a Settings struct
@ -42,7 +43,7 @@ func (sr *scrutinyRepository) LoadSettings(ctx context.Context) (*models.Setting
// curl -d '{"metrics": { "notify_level": 5, "status_filter_attributes": 5, "status_threshold": 5 }}' -H "Content-Type: application/json" -X POST http://localhost:9090/api/settings
// SaveSettings will update settings in AppConfig object, then save the settings to the database.
func (sr *scrutinyRepository) SaveSettings(ctx context.Context, settings models.Settings) error {
//save the entries to the appconfig
// save the entries to the appconfig
settingsMap := &map[string]interface{}{}
err := mapstructure.Decode(settings, &settingsMap)
if err != nil {
@ -55,13 +56,13 @@ func (sr *scrutinyRepository) SaveSettings(ctx context.Context, settings models.
return err
}
sr.logger.Debugf("after merge settings: %v", sr.appConfig.AllSettings())
//retrieve current settings from the database
// retrieve current settings from the database
settingsEntries := []models.SettingEntry{}
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
return fmt.Errorf("Could not get settings from DB: %v", err)
}
//update settingsEntries
// update settingsEntries
for ndx, settingsEntry := range settingsEntries {
configKey := fmt.Sprintf("%s.%s", config.DB_USER_SETTINGS_SUBKEY, strings.ToLower(settingsEntry.SettingKeyName))
@ -74,7 +75,7 @@ func (sr *scrutinyRepository) SaveSettings(ctx context.Context, settings models.
}
// store in database.
//TODO: this should be `sr.gormClient.Updates(&settingsEntries).Error`
// TODO: this should be `sr.gormClient.Updates(&settingsEntries).Error`
err := sr.gormClient.Model(&models.SettingEntry{}).Where([]uint{settingsEntry.ID}).Select("setting_value_numeric", "setting_value_string", "setting_value_bool").Updates(settingsEntries[ndx]).Error
if err != nil {
return err

@ -3,23 +3,24 @@ package database
import (
"context"
"fmt"
"github.com/influxdata/influxdb-client-go/v2/api"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tasks
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) error {
weeklyTaskName := "tsk-weekly-aggr"
weeklyTaskScript := sr.DownsampleScript("weekly", weeklyTaskName, "0 1 * * 0")
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: weeklyTaskName}); findErr == nil && len(found) == 0 {
//weekly on Sunday at 1:00am
// weekly on Sunday at 1:00am
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, weeklyTaskScript, orgID)
if err != nil {
return err
}
} else if len(found) == 1 {
//check if we should update
// check if we should update
task := &found[0]
if weeklyTaskScript != task.Flux {
sr.logger.Infoln("updating weekly task script")
@ -34,13 +35,13 @@ func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) err
monthlyTaskName := "tsk-monthly-aggr"
monthlyTaskScript := sr.DownsampleScript("monthly", monthlyTaskName, "30 1 1 * *")
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: monthlyTaskName}); findErr == nil && len(found) == 0 {
//monthly on first day of the month at 1:30am
// monthly on first day of the month at 1:30am
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, monthlyTaskScript, orgID)
if err != nil {
return err
}
} else if len(found) == 1 {
//check if we should update
// check if we should update
task := &found[0]
if monthlyTaskScript != task.Flux {
sr.logger.Infoln("updating monthly task script")
@ -55,13 +56,13 @@ func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) err
yearlyTaskName := "tsk-yearly-aggr"
yearlyTaskScript := sr.DownsampleScript("yearly", yearlyTaskName, "0 2 1 1 *")
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: yearlyTaskName}); findErr == nil && len(found) == 0 {
//yearly on the first day of the year at 2:00am
// yearly on the first day of the year at 2:00am
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, yearlyTaskScript, orgID)
if err != nil {
return err
}
} else if len(found) == 1 {
//check if we should update
// check if we should update
task := &found[0]
if yearlyTaskScript != task.Flux {
sr.logger.Infoln("updating yearly task script")
@ -75,7 +76,7 @@ func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) err
return nil
}
func (sr *scrutinyRepository) DownsampleScript(aggregationType string, name string, cron string) string {
func (sr *scrutinyRepository) DownsampleScript(aggregationType, name, cron string) string {
var sourceBucket string // the source of the data
var destBucket string // the destination for the aggregated data
var rangeStart string

@ -1,16 +1,17 @@
package database
import (
"testing"
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"testing"
)
func Test_DownsampleScript_Weekly(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -23,10 +24,10 @@ func Test_DownsampleScript_Weekly(t *testing.T) {
aggregationType := "weekly"
//test
// test
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-weekly-aggr", "0 1 * * 0")
//assert
// assert
require.Equal(t, `
option task = {
name: "tsk-weekly-aggr",
@ -62,7 +63,7 @@ from(bucket: sourceBucket)
func Test_DownsampleScript_Monthly(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -75,10 +76,10 @@ func Test_DownsampleScript_Monthly(t *testing.T) {
aggregationType := "monthly"
//test
// test
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-monthly-aggr", "30 1 1 * *")
//assert
// assert
require.Equal(t, `
option task = {
name: "tsk-monthly-aggr",
@ -114,7 +115,7 @@ from(bucket: sourceBucket)
func Test_DownsampleScript_Yearly(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -127,10 +128,10 @@ func Test_DownsampleScript_Yearly(t *testing.T) {
aggregationType := "yearly"
//test
// test
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-yearly-aggr", "0 2 1 1 *")
//assert
// assert
require.Equal(t, `
option task = {
name: "tsk-yearly-aggr",

@ -3,21 +3,21 @@ package database
import (
"context"
"fmt"
"strings"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
"strings"
"time"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Temperature Data
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn string, deviceProtocol string, collectorSmartData collector.SmartInfo) error {
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn, deviceProtocol string, collectorSmartData collector.SmartInfo) error {
if len(collectorSmartData.AtaSctTemperatureHistory.Table) > 0 {
for ndx, temp := range collectorSmartData.AtaSctTemperatureHistory.Table {
//temp value may be null, we must skip/ignore them. See #393
// temp value may be null, we must skip/ignore them. See #393
if temp == 0 {
continue
}
@ -59,21 +59,20 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
}
func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error) {
//we can get temp history for "week", "month", DURATION_KEY_YEAR, "forever"
// we can get temp history for "week", "month", DURATION_KEY_YEAR, "forever"
deviceTempHistory := map[string][]measurements.SmartTemperature{}
//TODO: change the query range to a variable.
// TODO: change the query range to a variable.
queryStr := sr.aggregateTempQuery(durationKey)
result, err := sr.influxQueryApi.Query(ctx, queryStr)
if err == nil {
// Use Next() to iterate over query result lines
for result.Next() {
if deviceWWN, ok := result.Record().Values()["device_wwn"]; ok {
//check if deviceWWN has been seen and initialized already
// check if deviceWWN has been seen and initialized already
if _, ok := deviceTempHistory[deviceWWN.(string)]; !ok {
deviceTempHistory[deviceWWN.(string)] = []measurements.SmartTemperature{}
}
@ -96,7 +95,6 @@ func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, du
return nil, err
}
return deviceTempHistory, nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -104,7 +102,6 @@ func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, du
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
/*
import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
@ -152,7 +149,7 @@ func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
}
if len(subQueryNames) == 1 {
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
// there's only one bucket being queried, no need to union, just aggregate the dataset and return
partialQueryStr = append(partialQueryStr, []string{
subQueryNames[0],
"|> schema.fieldsAsCols()",

@ -1,16 +1,17 @@
package database
import (
"testing"
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"testing"
)
func Test_aggregateTempQuery_Week(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -23,10 +24,10 @@ func Test_aggregateTempQuery_Week(t *testing.T) {
aggregationType := DURATION_KEY_WEEK
//test
// test
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
//assert
// assert
require.Equal(t, `import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
|> range(start: -1w, stop: now())
@ -43,7 +44,7 @@ weekData
func Test_aggregateTempQuery_Month(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -56,10 +57,10 @@ func Test_aggregateTempQuery_Month(t *testing.T) {
aggregationType := DURATION_KEY_MONTH
//test
// test
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
//assert
// assert
require.Equal(t, `import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
|> range(start: -1w, stop: now())
@ -84,7 +85,7 @@ union(tables: [weekData, monthData])
func Test_aggregateTempQuery_Year(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -97,10 +98,10 @@ func Test_aggregateTempQuery_Year(t *testing.T) {
aggregationType := DURATION_KEY_YEAR
//test
// test
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
//assert
// assert
require.Equal(t, `import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
|> range(start: -1w, stop: now())
@ -132,7 +133,7 @@ union(tables: [weekData, monthData, yearData])
func Test_aggregateTempQuery_Forever(t *testing.T) {
t.Parallel()
//setup
// setup
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeConfig := mock_config.NewMockInterface(mockCtrl)
@ -145,10 +146,10 @@ func Test_aggregateTempQuery_Forever(t *testing.T) {
aggregationType := DURATION_KEY_FOREVER
//test
// test
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
//assert
// assert
require.Equal(t, `import "influxdata/influxdb/schema"
weekData = from(bucket: "metrics")
|> range(start: -1w, stop: now())

@ -1,9 +1,10 @@
package errors_test
import (
"testing"
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
"github.com/stretchr/testify/require"
"testing"
)
//func TestCheckErr_WithoutError(t *testing.T) {
@ -27,7 +28,7 @@ import (
func TestErrors(t *testing.T) {
t.Parallel()
//assert
// assert
require.Implements(t, (*error)(nil), errors.ConfigFileMissingError("test"), "should implement the error interface")
require.Implements(t, (*error)(nil), errors.ConfigValidationError("test"), "should implement the error interface")
require.Implements(t, (*error)(nil), errors.DependencyMissingError("test"), "should implement the error interface")

@ -1,9 +1,10 @@
package models
import (
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"time"
)
type DeviceWrapper struct {
@ -13,7 +14,7 @@ type DeviceWrapper struct {
}
type Device struct {
//GORM attributes, see: http://gorm.io/docs/conventions.html
// GORM attributes, see: http://gorm.io/docs/conventions.html
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
@ -35,8 +36,8 @@ type Device struct {
Capacity int64 `json:"capacity"`
FormFactor string `json:"form_factor"`
SmartSupport bool `json:"smart_support"`
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
DeviceProtocol string `json:"device_protocol"` // protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
DeviceType string `json:"device_type"` // device type is used for querying with -d/t flag, should only be used by collector.
// User provided metadata
Label string `json:"label"`

@ -1,8 +1,9 @@
package models
import (
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
)
type DeviceSummaryWrapper struct {

@ -2,13 +2,14 @@ package measurements
import (
"fmt"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
"log"
"strconv"
"strings"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
)
type Smart struct {
@ -16,15 +17,15 @@ type Smart struct {
DeviceWWN string `json:"device_wwn"` //(tag)
DeviceProtocol string `json:"device_protocol"`
//Metrics (fields)
// Metrics (fields)
Temp int64 `json:"temp"`
PowerOnHours int64 `json:"power_on_hours"`
PowerCycleCount int64 `json:"power_cycle_count"`
//Attributes (fields)
// Attributes (fields)
Attributes map[string]SmartAttribute `json:"attrs"`
//status
// status
Status pkg.DeviceStatus
}
@ -50,10 +51,10 @@ func (sm *Smart) Flatten() (tags map[string]string, fields map[string]interface{
}
func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
//go though the massive map returned from influxdb. If a key is associated with the Smart struct, assign it. If it starts with "attr.*" group it by attributeId, and pass to attribute inflate.
// go though the massive map returned from influxdb. If a key is associated with the Smart struct, assign it. If it starts with "attr.*" group it by attributeId, and pass to attribute inflate.
sm := Smart{
//required fields
// required fields
Date: attrs["_time"].(time.Time),
DeviceWWN: attrs["device_wwn"].(string),
DeviceProtocol: attrs["device_protocol"].(string),
@ -74,7 +75,7 @@ func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
if !strings.HasPrefix(key, "attr.") {
continue
}
//this is a attribute, lets group it with its related "siblings", populating a SmartAttribute object
// this is a attribute, lets group it with its related "siblings", populating a SmartAttribute object
keyParts := strings.Split(key, ".")
attributeId := keyParts[1]
if _, ok := sm.Attributes[attributeId]; !ok {
@ -92,7 +93,6 @@ func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
sm.Attributes[attributeId].Inflate(key, val)
}
}
log.Printf("Found Smart Device (%s) Attributes (%v)", sm.DeviceWWN, len(sm.Attributes))
@ -100,12 +100,12 @@ func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
return &sm, nil
}
//Parse Collector SMART data results and create Smart object (and associated SmartAtaAttribute entries)
// Parse Collector SMART data results and create Smart object (and associated SmartAtaAttribute entries)
func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) error {
sm.DeviceWWN = wwn
sm.Date = time.Unix(info.LocalTime.TimeT, 0)
//smart metrics
// smart metrics
sm.Temp = info.Temperature.Current
sm.PowerCycleCount = info.PowerCycleCount
sm.PowerOnHours = info.PowerOnTime.Hours
@ -127,7 +127,7 @@ func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) er
return nil
}
//generate SmartAtaAttribute entries from Scrutiny Collector Smart data.
// generate SmartAtaAttribute entries from Scrutiny Collector Smart data.
func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTableItem) {
for _, collectorAttr := range tableItems {
attrModel := SmartAtaAttribute{
@ -140,7 +140,7 @@ func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTa
WhenFailed: collectorAttr.WhenFailed,
}
//now that we've parsed the data from the smartctl response, lets match it against our metadata rules and add additional Scrutiny specific data.
// now that we've parsed the data from the smartctl response, lets match it against our metadata rules and add additional Scrutiny specific data.
if smartMetadata, ok := thresholds.AtaMetadata[collectorAttr.ID]; ok {
if smartMetadata.Transform != nil {
attrModel.TransformedValue = smartMetadata.Transform(attrModel.Value, attrModel.RawValue, attrModel.RawString)
@ -155,9 +155,8 @@ func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTa
}
}
//generate SmartNvmeAttribute entries from Scrutiny Collector Smart data.
// generate SmartNvmeAttribute entries from Scrutiny Collector Smart data.
func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.NvmeSmartHealthInformationLog) {
sm.Attributes = map[string]SmartAttribute{
"critical_warning": (&SmartNvmeAttribute{AttributeId: "critical_warning", Value: nvmeSmartHealthInformationLog.CriticalWarning, Threshold: 0}).PopulateAttributeStatus(),
"temperature": (&SmartNvmeAttribute{AttributeId: "temperature", Value: nvmeSmartHealthInformationLog.Temperature, Threshold: -1}).PopulateAttributeStatus(),
@ -177,7 +176,7 @@ func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.Nv
"critical_comp_time": (&SmartNvmeAttribute{AttributeId: "critical_comp_time", Value: nvmeSmartHealthInformationLog.CriticalCompTime, Threshold: -1}).PopulateAttributeStatus(),
}
//find analyzed attribute status
// find analyzed attribute status
for _, val := range sm.Attributes {
if pkg.AttributeStatusHas(val.GetStatus(), pkg.AttributeStatusFailedScrutiny) {
sm.Status = pkg.DeviceStatusSet(sm.Status, pkg.DeviceStatusFailedScrutiny)
@ -185,7 +184,7 @@ func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.Nv
}
}
//generate SmartScsiAttribute entries from Scrutiny Collector Smart data.
// generate SmartScsiAttribute entries from Scrutiny Collector Smart data.
func (sm *Smart) ProcessScsiSmartInfo(defectGrownList int64, scsiErrorCounterLog collector.ScsiErrorCounterLog) {
sm.Attributes = map[string]SmartAttribute{
"scsi_grown_defect_list": (&SmartScsiAttribute{AttributeId: "scsi_grown_defect_list", Value: defectGrownList, Threshold: 0}).PopulateAttributeStatus(),
@ -203,7 +202,7 @@ func (sm *Smart) ProcessScsiSmartInfo(defectGrownList int64, scsiErrorCounterLog
"write_total_uncorrected_errors": (&SmartScsiAttribute{AttributeId: "write_total_uncorrected_errors", Value: scsiErrorCounterLog.Write.TotalUncorrectedErrors, Threshold: 0}).PopulateAttributeStatus(),
}
//find analyzed attribute status
// find analyzed attribute status
for _, val := range sm.Attributes {
if pkg.AttributeStatusHas(val.GetStatus(), pkg.AttributeStatusFailedScrutiny) {
sm.Status = pkg.DeviceStatusSet(sm.Status, pkg.DeviceStatusFailedScrutiny)

@ -18,7 +18,7 @@ type SmartAtaAttribute struct {
RawString string `json:"raw_string"`
WhenFailed string `json:"when_failed"`
//Generated data
// Generated data
TransformedValue int64 `json:"transformed_value"`
Status pkg.AttributeStatus `json:"status"`
StatusReason string `json:"status_reason,omitempty"`
@ -34,7 +34,6 @@ func (sa *SmartAtaAttribute) GetStatus() pkg.AttributeStatus {
}
func (sa *SmartAtaAttribute) Flatten() map[string]interface{} {
idString := strconv.Itoa(sa.AttributeId)
return map[string]interface{}{
@ -46,13 +45,14 @@ func (sa *SmartAtaAttribute) Flatten() map[string]interface{} {
fmt.Sprintf("attr.%s.raw_string", idString): sa.RawString,
fmt.Sprintf("attr.%s.when_failed", idString): sa.WhenFailed,
//Generated Data
// Generated Data
fmt.Sprintf("attr.%s.transformed_value", idString): sa.TransformedValue,
fmt.Sprintf("attr.%s.status", idString): int64(sa.Status),
fmt.Sprintf("attr.%s.status_reason", idString): sa.StatusReason,
fmt.Sprintf("attr.%s.failure_rate", idString): sa.FailureRate,
}
}
func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
if val == nil {
return
@ -78,7 +78,7 @@ func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
case "when_failed":
sa.WhenFailed = val.(string)
//generated
// generated
case "transformed_value":
sa.TransformedValue = val.(int64)
case "status":
@ -91,14 +91,14 @@ func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
}
}
//populate attribute status, using SMART Thresholds & Observed Metadata
// populate attribute status, using SMART Thresholds & Observed Metadata
// Chainable
func (sa *SmartAtaAttribute) PopulateAttributeStatus() *SmartAtaAttribute {
if strings.ToUpper(sa.WhenFailed) == pkg.AttributeWhenFailedFailingNow {
//this attribute has previously failed
// this attribute has previously failed
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedSmart)
sa.StatusReason += "Attribute is failing manufacturer SMART threshold"
//if the Smart Status is failed, we should exit early, no need to look at thresholds.
// if the Smart Status is failed, we should exit early, no need to look at thresholds.
return sa
} else if strings.ToUpper(sa.WhenFailed) == pkg.AttributeWhenFailedInThePast {
@ -115,7 +115,7 @@ func (sa *SmartAtaAttribute) PopulateAttributeStatus() *SmartAtaAttribute {
// compare the attribute (raw, normalized, transformed) value to observed thresholds, and update status if necessary
func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttributeMetadata) {
//TODO: multiple rules
// TODO: multiple rules
// try to predict the failure rates for observed thresholds that have 0 failure rate and error bars.
// - if the attribute is critical
// - the failure rate is over 10 - set to failed
@ -124,7 +124,7 @@ func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttri
// - if failure rate is above 20 - set to failed
// - if failure rate is above 10 but below 20 - set to warn
//update the smart attribute status based on Observed thresholds.
// update the smart attribute status based on Observed thresholds.
var value int64
if smartMetadata.DisplayType == thresholds.AtaSmartAttributeDisplayTypeNormalized {
value = int64(sa.Value)
@ -135,8 +135,7 @@ func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttri
}
for _, obsThresh := range smartMetadata.ObservedThresholds {
//check if "value" is in this bucket
// check if "value" is in this bucket
if ((obsThresh.Low == obsThresh.High) && value == obsThresh.Low) ||
(obsThresh.Low < value && value <= obsThresh.High) {
sa.FailureRate = obsThresh.AnnualFailureRate
@ -156,7 +155,7 @@ func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttri
}
}
//we've found the correct bucket, we can drop out of this loop
// we've found the correct bucket, we can drop out of this loop
return
}
}

@ -9,7 +9,7 @@ import (
)
type SmartNvmeAttribute struct {
AttributeId string `json:"attribute_id"` //json string from smartctl
AttributeId string `json:"attribute_id"` // json string from smartctl
Value int64 `json:"value"`
Threshold int64 `json:"thresh"`
@ -33,13 +33,14 @@ func (sa *SmartNvmeAttribute) Flatten() map[string]interface{} {
fmt.Sprintf("attr.%s.value", sa.AttributeId): sa.Value,
fmt.Sprintf("attr.%s.thresh", sa.AttributeId): sa.Threshold,
//Generated Data
// Generated Data
fmt.Sprintf("attr.%s.transformed_value", sa.AttributeId): sa.TransformedValue,
fmt.Sprintf("attr.%s.status", sa.AttributeId): int64(sa.Status),
fmt.Sprintf("attr.%s.status_reason", sa.AttributeId): sa.StatusReason,
fmt.Sprintf("attr.%s.failure_rate", sa.AttributeId): sa.FailureRate,
}
}
func (sa *SmartNvmeAttribute) Inflate(key string, val interface{}) {
if val == nil {
return
@ -55,7 +56,7 @@ func (sa *SmartNvmeAttribute) Inflate(key string, val interface{}) {
case "thresh":
sa.Threshold = val.(int64)
//generated
// generated
case "transformed_value":
sa.TransformedValue = val.(int64)
case "status":
@ -67,14 +68,13 @@ func (sa *SmartNvmeAttribute) Inflate(key string, val interface{}) {
}
}
//populate attribute status, using SMART Thresholds & Observed Metadata
// populate attribute status, using SMART Thresholds & Observed Metadata
// Chainable
func (sa *SmartNvmeAttribute) PopulateAttributeStatus() *SmartNvmeAttribute {
//-1 is a special number meaning no threshold.
if sa.Threshold != -1 {
if smartMetadata, ok := thresholds.NmveMetadata[sa.AttributeId]; ok {
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
// check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedScrutiny)
@ -82,7 +82,7 @@ func (sa *SmartNvmeAttribute) PopulateAttributeStatus() *SmartNvmeAttribute {
}
}
}
//TODO: eventually figure out the critical_warning bits and determine correct error messages here.
// TODO: eventually figure out the critical_warning bits and determine correct error messages here.
return sa
}

@ -9,7 +9,7 @@ import (
)
type SmartScsiAttribute struct {
AttributeId string `json:"attribute_id"` //json string from smartctl
AttributeId string `json:"attribute_id"` // json string from smartctl
Value int64 `json:"value"`
Threshold int64 `json:"thresh"`
@ -33,13 +33,14 @@ func (sa *SmartScsiAttribute) Flatten() map[string]interface{} {
fmt.Sprintf("attr.%s.value", sa.AttributeId): sa.Value,
fmt.Sprintf("attr.%s.thresh", sa.AttributeId): sa.Threshold,
//Generated Data
// Generated Data
fmt.Sprintf("attr.%s.transformed_value", sa.AttributeId): sa.TransformedValue,
fmt.Sprintf("attr.%s.status", sa.AttributeId): int64(sa.Status),
fmt.Sprintf("attr.%s.status_reason", sa.AttributeId): sa.StatusReason,
fmt.Sprintf("attr.%s.failure_rate", sa.AttributeId): sa.FailureRate,
}
}
func (sa *SmartScsiAttribute) Inflate(key string, val interface{}) {
if val == nil {
return
@ -55,7 +56,7 @@ func (sa *SmartScsiAttribute) Inflate(key string, val interface{}) {
case "thresh":
sa.Threshold = val.(int64)
//generated
// generated
case "transformed_value":
sa.TransformedValue = val.(int64)
case "status":
@ -67,15 +68,13 @@ func (sa *SmartScsiAttribute) Inflate(key string, val interface{}) {
}
}
//
//populate attribute status, using SMART Thresholds & Observed Metadata
//Chainable
// populate attribute status, using SMART Thresholds & Observed Metadata
// Chainable
func (sa *SmartScsiAttribute) PopulateAttributeStatus() *SmartScsiAttribute {
//-1 is a special number meaning no threshold.
if sa.Threshold != -1 {
if smartMetadata, ok := thresholds.NmveMetadata[sa.AttributeId]; ok {
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
// check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedScrutiny)

@ -2,18 +2,19 @@ package measurements_test
import (
"encoding/json"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
"github.com/stretchr/testify/require"
"io/ioutil"
"os"
"testing"
"time"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
"github.com/stretchr/testify/require"
)
func TestSmart_Flatten(t *testing.T) {
//setup
// setup
timeNow := time.Now()
smart := measurements.Smart{
Date: timeNow,
@ -26,16 +27,16 @@ func TestSmart_Flatten(t *testing.T) {
Status: 0,
}
//test
// test
tags, fields := smart.Flatten()
//assert
// assert
require.Equal(t, map[string]string{"device_protocol": "ATA", "device_wwn": "test-wwn"}, tags)
require.Equal(t, map[string]interface{}{"power_cycle_count": int64(10), "power_on_hours": int64(10), "temp": int64(50)}, fields)
}
func TestSmart_Flatten_ATA(t *testing.T) {
//setup
// setup
timeNow := time.Now()
smart := measurements.Smart{
Date: timeNow,
@ -67,10 +68,10 @@ func TestSmart_Flatten_ATA(t *testing.T) {
},
}
//test
// test
tags, fields := smart.Flatten()
//assert
// assert
require.Equal(t, map[string]string{"device_protocol": "ATA", "device_wwn": "test-wwn"}, tags)
require.Equal(t, map[string]interface{}{
"attr.1.attribute_id": "1",
@ -104,7 +105,7 @@ func TestSmart_Flatten_ATA(t *testing.T) {
}
func TestSmart_Flatten_SCSI(t *testing.T) {
//setup
// setup
timeNow := time.Now()
smart := measurements.Smart{
Date: timeNow,
@ -122,10 +123,10 @@ func TestSmart_Flatten_SCSI(t *testing.T) {
},
}
//test
// test
tags, fields := smart.Flatten()
//assert
// assert
require.Equal(t, map[string]string{"device_protocol": "SCSI", "device_wwn": "test-wwn"}, tags)
require.Equal(t, map[string]interface{}{
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
@ -137,12 +138,13 @@ func TestSmart_Flatten_SCSI(t *testing.T) {
"attr.read_errors_corrected_by_eccfast.value": int64(300357663),
"power_cycle_count": int64(10),
"power_on_hours": int64(10),
"temp": int64(50)},
"temp": int64(50),
},
fields)
}
func TestSmart_Flatten_NVMe(t *testing.T) {
//setup
// setup
timeNow := time.Now()
smart := measurements.Smart{
Date: timeNow,
@ -160,10 +162,10 @@ func TestSmart_Flatten_NVMe(t *testing.T) {
},
}
//test
// test
tags, fields := smart.Flatten()
//assert
// assert
require.Equal(t, map[string]string{"device_protocol": "NVMe", "device_wwn": "test-wwn"}, tags)
require.Equal(t, map[string]interface{}{
"attr.available_spare.attribute_id": "available_spare",
@ -175,11 +177,12 @@ func TestSmart_Flatten_NVMe(t *testing.T) {
"attr.available_spare.value": int64(100),
"power_cycle_count": int64(10),
"power_on_hours": int64(10),
"temp": int64(50)}, fields)
"temp": int64(50),
}, fields)
}
func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
//setup
// setup
timeNow := time.Now()
attrs := map[string]interface{}{
"_time": timeNow,
@ -201,10 +204,10 @@ func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
"temp": int64(50),
}
//test
// test
smart, err := measurements.NewSmartFromInfluxDB(attrs)
//assert
// assert
require.NoError(t, err)
require.Equal(t, &measurements.Smart{
Date: timeNow,
@ -223,11 +226,12 @@ func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
RawString: "108",
WhenFailed: "",
},
}, Status: 0}, smart)
}, Status: 0,
}, smart)
}
func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
//setup
// setup
timeNow := time.Now()
attrs := map[string]interface{}{
"_time": timeNow,
@ -245,10 +249,10 @@ func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
"temp": int64(50),
}
//test
// test
smart, err := measurements.NewSmartFromInfluxDB(attrs)
//assert
// assert
require.NoError(t, err)
require.Equal(t, &measurements.Smart{
Date: timeNow,
@ -262,11 +266,12 @@ func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
AttributeId: "available_spare",
Value: int64(100),
},
}, Status: 0}, smart)
}, Status: 0,
}, smart)
}
func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
//setup
// setup
timeNow := time.Now()
attrs := map[string]interface{}{
"_time": timeNow,
@ -284,10 +289,10 @@ func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
"temp": int64(50),
}
//test
// test
smart, err := measurements.NewSmartFromInfluxDB(attrs)
//assert
// assert
require.NoError(t, err)
require.Equal(t, &measurements.Smart{
Date: timeNow,
@ -301,11 +306,12 @@ func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
AttributeId: "read_errors_corrected_by_eccfast",
Value: int64(300357663),
},
}, Status: 0}, smart)
}, Status: 0,
}, smart)
}
func TestFromCollectorSmartInfo(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-ata.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -317,27 +323,26 @@ func TestFromCollectorSmartInfo(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
require.Equal(t, 18, len(smartMdl.Attributes))
//check that temperature was correctly parsed
// check that temperature was correctly parsed
require.Equal(t, int64(163210330144), smartMdl.Attributes["194"].(*measurements.SmartAtaAttribute).RawValue)
require.Equal(t, int64(32), smartMdl.Attributes["194"].(*measurements.SmartAtaAttribute).TransformedValue)
//ensure that Scrutiny warning for a non critical attribute does not set device status to failed.
// ensure that Scrutiny warning for a non critical attribute does not set device status to failed.
require.Equal(t, pkg.AttributeStatusWarningScrutiny, smartMdl.Attributes["3"].GetStatus())
}
func TestFromCollectorSmartInfo_Fail_Smart(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-fail.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -349,11 +354,11 @@ func TestFromCollectorSmartInfo_Fail_Smart(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusFailedSmart, smartMdl.Status)
@ -361,7 +366,7 @@ func TestFromCollectorSmartInfo_Fail_Smart(t *testing.T) {
}
func TestFromCollectorSmartInfo_Fail_ScrutinySmart(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-fail2.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -373,11 +378,11 @@ func TestFromCollectorSmartInfo_Fail_ScrutinySmart(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusFailedScrutiny|pkg.DeviceStatusFailedSmart, smartMdl.Status)
@ -385,7 +390,7 @@ func TestFromCollectorSmartInfo_Fail_ScrutinySmart(t *testing.T) {
}
func TestFromCollectorSmartInfo_Fail_ScrutinyNonCriticalFailed(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-ata-failed-scrutiny.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -397,11 +402,11 @@ func TestFromCollectorSmartInfo_Fail_ScrutinyNonCriticalFailed(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
@ -414,11 +419,10 @@ func TestFromCollectorSmartInfo_Fail_ScrutinyNonCriticalFailed(t *testing.T) {
require.Equal(t, 14, len(smartMdl.Attributes))
}
//TODO: Scrutiny Warn
//TODO: Smart + Scrutiny Warn
// TODO: Scrutiny Warn
// TODO: Smart + Scrutiny Warn
func TestFromCollectorSmartInfo_NVMe_Fail_Scrutiny(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-nvme-failed.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -430,11 +434,11 @@ func TestFromCollectorSmartInfo_NVMe_Fail_Scrutiny(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
@ -449,7 +453,7 @@ func TestFromCollectorSmartInfo_NVMe_Fail_Scrutiny(t *testing.T) {
}
func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-nvme.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -461,11 +465,11 @@ func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
@ -476,7 +480,7 @@ func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
}
func TestFromCollectorSmartInfo_Scsi(t *testing.T) {
//setup
// setup
smartDataFile, err := os.Open("../testdata/smart-scsi.json")
require.NoError(t, err)
defer smartDataFile.Close()
@ -488,16 +492,16 @@ func TestFromCollectorSmartInfo_Scsi(t *testing.T) {
err = json.Unmarshal(smartDataBytes, &smartJson)
require.NoError(t, err)
//test
// test
smartMdl := measurements.Smart{}
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
//assert
// assert
require.NoError(t, err)
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
require.Equal(t, 13, len(smartMdl.Attributes))
require.Equal(t, int64(56), smartMdl.Attributes["scsi_grown_defect_list"].(*measurements.SmartScsiAttribute).Value)
require.Equal(t, int64(300357663), smartMdl.Attributes["read_errors_corrected_by_eccfast"].(*measurements.SmartScsiAttribute).Value) //total_errors_corrected
require.Equal(t, int64(300357663), smartMdl.Attributes["read_errors_corrected_by_eccfast"].(*measurements.SmartScsiAttribute).Value) // total_errors_corrected
}

@ -6,7 +6,7 @@ import (
// SettingEntry matches a setting row in the database
type SettingEntry struct {
//GORM attributes, see: http://gorm.io/docs/conventions.html
// GORM attributes, see: http://gorm.io/docs/conventions.html
gorm.Model
SettingKeyName string `json:"setting_key_name" gorm:"unique;not null"`

@ -26,10 +26,12 @@ import (
"golang.org/x/sync/errgroup"
)
const NotifyFailureTypeEmailTest = "EmailTest"
const NotifyFailureTypeBothFailure = "SmartFailure" //SmartFailure always takes precedence when Scrutiny & Smart failed.
const NotifyFailureTypeSmartFailure = "SmartFailure"
const NotifyFailureTypeScrutinyFailure = "ScrutinyFailure"
const (
NotifyFailureTypeEmailTest = "EmailTest"
NotifyFailureTypeBothFailure = "SmartFailure" // SmartFailure always takes precedence when Scrutiny & Smart failed.
NotifyFailureTypeSmartFailure = "SmartFailure"
NotifyFailureTypeScrutinyFailure = "ScrutinyFailure"
)
// ShouldNotify check if the error Message should be filtered (level mismatch or filtered_attributes)
func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs measurements.Smart, statusThreshold pkg.MetricsStatusThreshold, statusFilterAttributes pkg.MetricsStatusFilterAttributes, repeatNotifications bool, c *gin.Context, deviceRepo database.DeviceRepo) bool {
@ -38,7 +40,7 @@ func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs me
return false
}
//TODO: cannot check for warning notifyLevel yet.
// TODO: cannot check for warning notifyLevel yet.
// setup constants for comparison
var requiredDeviceStatus pkg.DeviceStatus
@ -48,7 +50,7 @@ func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs me
requiredDeviceStatus = pkg.DeviceStatusSet(pkg.DeviceStatusFailedSmart, pkg.DeviceStatusFailedScrutiny)
requiredAttrStatus = pkg.AttributeStatusSet(pkg.AttributeStatusFailedSmart, pkg.AttributeStatusFailedScrutiny)
} else if statusThreshold == pkg.MetricsStatusThresholdSmart {
//only smart failures
// only smart failures
requiredDeviceStatus = pkg.DeviceStatusFailedSmart
requiredAttrStatus = pkg.AttributeStatusFailedSmart
} else {
@ -79,7 +81,7 @@ func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs me
} else if device.IsNvme() {
critical = thresholds.NmveMetadata[attrId].Critical
} else {
//this is ATA
// this is ATA
attrIdInt, err := strconv.Atoi(attrId)
if err != nil {
continue
@ -123,15 +125,15 @@ func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs me
// TODO: include user label for device.
type Payload struct {
HostId string `json:"host_id,omitempty"` //host id (optional)
DeviceType string `json:"device_type"` //ATA/SCSI/NVMe
DeviceName string `json:"device_name"` //dev/sda
DeviceSerial string `json:"device_serial"` //WDDJ324KSO
HostId string `json:"host_id,omitempty"` // host id (optional)
DeviceType string `json:"device_type"` // ATA/SCSI/NVMe
DeviceName string `json:"device_name"` // dev/sda
DeviceSerial string `json:"device_serial"` // WDDJ324KSO
Test bool `json:"test"` // false
//private, populated during init (marked as Public for JSON serialization)
Date string `json:"date"` //populated by Send function.
FailureType string `json:"failure_type"` //EmailTest, BothFail, SmartFail, ScrutinyFail
// private, populated during init (marked as Public for JSON serialization)
Date string `json:"date"` // populated by Send function.
FailureType string `json:"failure_type"` // EmailTest, BothFail, SmartFail, ScrutinyFail
Subject string `json:"subject"`
Message string `json:"message"`
}
@ -145,7 +147,7 @@ func NewPayload(device models.Device, test bool, currentTime ...time.Time) Paylo
Test: test,
}
//validate that the Payload is populated
// validate that the Payload is populated
var sendDate time.Time
if currentTime != nil && len(currentTime) > 0 {
sendDate = currentTime[0]
@ -161,21 +163,21 @@ func NewPayload(device models.Device, test bool, currentTime ...time.Time) Paylo
}
func (p *Payload) GenerateFailureType(deviceStatus pkg.DeviceStatus) string {
//generate a failure type, given Test and DeviceStatus
// generate a failure type, given Test and DeviceStatus
if p.Test {
return NotifyFailureTypeEmailTest // must be an email test if "Test" is true
}
if pkg.DeviceStatusHas(deviceStatus, pkg.DeviceStatusFailedSmart) && pkg.DeviceStatusHas(deviceStatus, pkg.DeviceStatusFailedScrutiny) {
return NotifyFailureTypeBothFailure //both failed
return NotifyFailureTypeBothFailure // both failed
} else if pkg.DeviceStatusHas(deviceStatus, pkg.DeviceStatusFailedSmart) {
return NotifyFailureTypeSmartFailure //only SMART failed
return NotifyFailureTypeSmartFailure // only SMART failed
} else {
return NotifyFailureTypeScrutinyFailure //only Scrutiny failed
return NotifyFailureTypeScrutinyFailure // only Scrutiny failed
}
}
func (p *Payload) GenerateSubject() string {
//generate a detailed failure message
// generate a detailed failure message
var subject string
if len(p.HostId) > 0 {
subject = fmt.Sprintf("Scrutiny SMART error (%s) detected on [host]device: [%s]%s", p.FailureType, p.HostId, p.DeviceName)
@ -186,7 +188,7 @@ func (p *Payload) GenerateSubject() string {
}
func (p *Payload) GenerateMessage() string {
//generate a detailed failure message
// generate a detailed failure message
messageParts := []string{}
@ -226,8 +228,7 @@ type Notify struct {
}
func (n *Notify) Send() error {
//retrieve list of notification endpoints from config file
// retrieve list of notification endpoints from config file
configUrls := n.Config.GetStringSlice("notify.urls")
n.Logger.Debugf("Configured notification services: %v", configUrls)
@ -236,7 +237,7 @@ func (n *Notify) Send() error {
return nil
}
//remove http:// https:// and script:// prefixed urls
// remove http:// https:// and script:// prefixed urls
notifyWebhooks := []string{}
notifyScripts := []string{}
notifyShoutrrr := []string{}
@ -255,8 +256,8 @@ func (n *Notify) Send() error {
n.Logger.Debugf("Configured webhooks: %v", notifyWebhooks)
n.Logger.Debugf("Configured shoutrrr: %v", notifyShoutrrr)
//run all scripts, webhooks and shoutrr commands in parallel
//var wg sync.WaitGroup
// run all scripts, webhooks and shoutrr commands in parallel
// var wg sync.WaitGroup
var eg errgroup.Group
for _, url := range notifyWebhooks {
@ -275,7 +276,7 @@ func (n *Notify) Send() error {
eg.Go(func() error { return n.SendShoutrrrNotification(_url) })
}
//and wait for completion, error or timeout.
// and wait for completion, error or timeout.
n.Logger.Debugf("Main: waiting for notifications to complete.")
if err := eg.Wait(); err == nil {
@ -307,12 +308,12 @@ func (n *Notify) SendWebhookNotification(webhookUrl string) error {
return err
}
defer resp.Body.Close()
//we don't care about resp body content, but maybe we should log it?
// we don't care about resp body content, but maybe we should log it?
return nil
}
func (n *Notify) SendScriptNotification(scriptUrl string) error {
//check if the script exists.
// check if the script exists.
scriptPath := strings.TrimPrefix(scriptUrl, "script://")
n.Logger.Infof("Executing Script %s", scriptPath)
@ -341,7 +342,6 @@ func (n *Notify) SendScriptNotification(scriptUrl string) error {
}
func (n *Notify) SendShoutrrrNotification(shoutrrrUrl string) error {
fmt.Printf("Sending Notifications to %v", shoutrrrUrl)
n.Logger.Infof("Sending notifications to %v", shoutrrrUrl)
@ -351,7 +351,7 @@ func (n *Notify) SendShoutrrrNotification(shoutrrrUrl string) error {
return err
}
//sender.SetLogger(n.Logger.)
// sender.SetLogger(n.Logger.)
serviceName, params, err := n.GenShoutrrrNotificationParams(shoutrrrUrl)
n.Logger.Debugf("notification data for %s: (%s)\n%v", serviceName, shoutrrrUrl, params)
@ -370,7 +370,7 @@ func (n *Notify) SendShoutrrrNotification(shoutrrrUrl string) error {
}
errstrings = append(errstrings, err.Error())
}
//sometimes there are empty errs, we're going to skip them.
// sometimes there are empty errs, we're going to skip them.
if len(errstrings) == 0 {
return nil
} else {

@ -19,7 +19,7 @@ import (
func TestShouldNotify_MustSkipPassingDevices(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusPassed,
}
@ -30,13 +30,13 @@ func TestShouldNotify_MustSkipPassingDevices(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusThresholdBoth_FailingSmartDevice(t *testing.T) {
t.Parallel()
//setupD
// setupD
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -46,13 +46,13 @@ func TestShouldNotify_MetricsStatusThresholdBoth_FailingSmartDevice(t *testing.T
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusThresholdSmart_FailingSmartDevice(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -62,13 +62,13 @@ func TestShouldNotify_MetricsStatusThresholdSmart_FailingSmartDevice(t *testing.
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusThresholdScrutiny_FailingSmartDevice(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -78,13 +78,13 @@ func TestShouldNotify_MetricsStatusThresholdScrutiny_FailingSmartDevice(t *testi
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithCriticalAttrs(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -99,13 +99,13 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithCriticalAttrs(t
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithMultipleCriticalAttrs(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -123,13 +123,13 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithMultipleCritical
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoCriticalAttrs(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -144,13 +144,13 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoCriticalAttrs(
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoFailingCriticalAttrs(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -165,13 +165,13 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoFailingCritica
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_MetricsStatusFilterAttributesCritical_MetricsStatusThresholdSmart_WithCriticalAttrsFailingScrutiny(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedSmart,
}
@ -189,12 +189,13 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_MetricsStatusThresho
defer mockCtrl.Finish()
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
//assert
// assert
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_NoRepeat_DatabaseFailure(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
}
@ -210,13 +211,13 @@ func TestShouldNotify_NoRepeat_DatabaseFailure(t *testing.T) {
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, "", database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{}, errors.New("")).Times(1)
//assert
// assert
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_NoRepeat_NoDatabaseData(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
}
@ -232,12 +233,13 @@ func TestShouldNotify_NoRepeat_NoDatabaseData(t *testing.T) {
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, "", database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{}, nil).Times(1)
//assert
// assert
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
}
func TestShouldNotify_NoRepeat(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
}
@ -254,14 +256,14 @@ func TestShouldNotify_NoRepeat(t *testing.T) {
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, "", database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{smartAttrs}, nil).Times(1)
//assert
// assert
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
}
func TestNewPayload(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
SerialNumber: "FAKEWDDJ324KSO",
DeviceType: pkg.DeviceProtocolAta,
@ -269,11 +271,11 @@ func TestNewPayload(t *testing.T) {
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
}
currentTime := time.Now()
//test
// test
payload := NewPayload(device, false, currentTime)
//assert
// assert
require.Equal(t, "Scrutiny SMART error (ScrutinyFailure) detected on device: /dev/sda", payload.Subject)
require.Equal(t, fmt.Sprintf(`Scrutiny SMART error notification for device: /dev/sda
Failure Type: ScrutinyFailure
@ -287,7 +289,7 @@ Date: %s`, currentTime.Format(time.RFC3339)), payload.Message)
func TestNewPayload_TestMode(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
SerialNumber: "FAKEWDDJ324KSO",
DeviceType: pkg.DeviceProtocolAta,
@ -295,11 +297,11 @@ func TestNewPayload_TestMode(t *testing.T) {
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
}
currentTime := time.Now()
//test
// test
payload := NewPayload(device, true, currentTime)
//assert
// assert
require.Equal(t, "Scrutiny SMART error (EmailTest) detected on device: /dev/sda", payload.Subject)
require.Equal(t, fmt.Sprintf(`TEST NOTIFICATION:
Scrutiny SMART error notification for device: /dev/sda
@ -314,7 +316,7 @@ Date: %s`, currentTime.Format(time.RFC3339)), payload.Message)
func TestNewPayload_WithHostId(t *testing.T) {
t.Parallel()
//setup
// setup
device := models.Device{
SerialNumber: "FAKEWDDJ324KSO",
DeviceType: pkg.DeviceProtocolAta,
@ -323,11 +325,11 @@ func TestNewPayload_WithHostId(t *testing.T) {
HostId: "custom-host",
}
currentTime := time.Now()
//test
// test
payload := NewPayload(device, false, currentTime)
//assert
// assert
require.Equal(t, "Scrutiny SMART error (ScrutinyFailure) detected on [host]device: [custom-host]/dev/sda", payload.Subject)
require.Equal(t, fmt.Sprintf(`Scrutiny SMART error notification for device: /dev/sda
Host Id: custom-host

@ -5,9 +5,11 @@ import (
"strings"
)
const AtaSmartAttributeDisplayTypeRaw = "raw"
const AtaSmartAttributeDisplayTypeNormalized = "normalized"
const AtaSmartAttributeDisplayTypeTransformed = "transformed"
const (
AtaSmartAttributeDisplayTypeRaw = "raw"
AtaSmartAttributeDisplayTypeNormalized = "normalized"
AtaSmartAttributeDisplayTypeTransformed = "transformed"
)
type AtaAttributeMetadata struct {
ID int64 `json:"-"`
@ -16,20 +18,22 @@ type AtaAttributeMetadata struct {
Critical bool `json:"critical"`
Description string `json:"description"`
Transform func(int64, int64, string) int64 `json:"-"` //this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
Transform func(int64, int64, string) int64 `json:"-"` // this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
TransformValueUnit string `json:"transform_value_unit,omitempty"`
ObservedThresholds []ObservedThreshold `json:"observed_thresholds,omitempty"` //these thresholds must match the DisplayType
ObservedThresholds []ObservedThreshold `json:"observed_thresholds,omitempty"` // these thresholds must match the DisplayType
DisplayType string `json:"display_type"` //"raw" "normalized" or "transformed"
}
const ObservedThresholdIdealLow = "low"
const ObservedThresholdIdealHigh = "high"
const (
ObservedThresholdIdealLow = "low"
ObservedThresholdIdealHigh = "high"
)
type ObservedThreshold struct {
Low int64 `json:"low"` //threshold (row/normalized data) boundary low value
High int64 `json:"high"` //threshold (row/normalized data) boundary high value
Low int64 `json:"low"` // threshold (row/normalized data) boundary low value
High int64 `json:"high"` // threshold (row/normalized data) boundary high value
AnnualFailureRate float64 `json:"annual_failure_rate"` //error rate %
AnnualFailureRate float64 `json:"annual_failure_rate"` // error rate %
ErrorInterval []float64 `json:"error_interval"`
}
@ -255,7 +259,6 @@ var AtaMetadata = map[int]AtaAttributeMetadata{
Description: "Average performance of seek operations of the magnetic heads. If this attribute is decreasing, it is a sign of problems in the mechanical subsystem.",
},
9: {
ID: 9,
DisplayName: "Power-On Hours",
DisplayType: AtaSmartAttributeDisplayTypeNormalized,
@ -277,7 +280,7 @@ var AtaMetadata = map[int]AtaAttributeMetadata{
AnnualFailureRate: 0.05459827163896099,
ErrorInterval: []float64{0.05113785787727033, 0.05823122757702782},
},
{ //TODO: using fake data from attribute 11. Not enough data, but critical and correlated with failure.
{ // TODO: using fake data from attribute 11. Not enough data, but critical and correlated with failure.
Low: 0,
High: 80,
AnnualFailureRate: 0.5555555555555556,
@ -671,7 +674,7 @@ var AtaMetadata = map[int]AtaAttributeMetadata{
Ideal: ObservedThresholdIdealLow,
Critical: true,
Description: "The count of aborted operations due to HDD timeout. Normally this attribute value should be equal to zero.",
Transform: func(normValue int64, rawValue int64, rawString string) int64 {
Transform: func(normValue, rawValue int64, rawString string) int64 {
// Parse Seagate command timeout values if the string contains 3 pieces
// and each piece is less than or equal to the next (as a sanity check)
// See https://github.com/AnalogJ/scrutiny/issues/522
@ -897,7 +900,7 @@ var AtaMetadata = map[int]AtaAttributeMetadata{
Ideal: ObservedThresholdIdealLow,
Critical: false,
Description: "Indicates the device temperature, if the appropriate sensor is fitted. Lowest byte of the raw value contains the exact temperature value (Celsius degrees).",
Transform: func(normValue int64, rawValue int64, rawString string) int64 {
Transform: func(normValue, rawValue int64, rawString string) int64 {
return rawValue & 0b11111111
},
TransformValueUnit: "°C",

@ -11,7 +11,7 @@ type NvmeAttributeMetadata struct {
Critical bool `json:"critical"`
Description string `json:"description"`
Transform func(int64, int64, string) int64 `json:"-"` //this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
Transform func(int64, int64, string) int64 `json:"-"` // this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
TransformValueUnit string `json:"transform_value_unit,omitempty"`
DisplayType string `json:"display_type"` //"raw" "normalized" or "transformed"
}

@ -7,7 +7,7 @@ type ScsiAttributeMetadata struct {
Critical bool `json:"critical"`
Description string `json:"description"`
Transform func(int64, int64, string) int64 `json:"-"` //this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
Transform func(int64, int64, string) int64 `json:"-"` // this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
TransformValueUnit string `json:"transform_value_unit,omitempty"`
DisplayType string `json:"display_type"` //"raw" "normalized" or "transformed"
}

@ -1,10 +1,11 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
func DeleteDevice(c *gin.Context) {

@ -1,10 +1,11 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
func GetDevicesSummary(c *gin.Context) {
@ -18,7 +19,7 @@ func GetDevicesSummary(c *gin.Context) {
return
}
//this must match DeviceSummaryWrapper (webapp/backend/pkg/models/device_summary.go)
// this must match DeviceSummaryWrapper (webapp/backend/pkg/models/device_summary.go)
c.JSON(http.StatusOK, gin.H{
"success": true,
"data": map[string]interface{}{

@ -1,10 +1,11 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
func GetDevicesSummaryTempHistory(c *gin.Context) {

@ -1,10 +1,11 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
func GetSettings(c *gin.Context) {

@ -1,10 +1,11 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
func HealthCheck(c *gin.Context) {
@ -12,7 +13,7 @@ func HealthCheck(c *gin.Context) {
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
logger.Infof("Checking Influxdb & Sqlite health")
//check sqlite and influxdb health
// check sqlite and influxdb health
err := deviceRepo.HealthCheck(c)
if err != nil {
logger.Errorln("An error occurred during healthcheck", err)
@ -20,7 +21,7 @@ func HealthCheck(c *gin.Context) {
return
}
//TODO:
// TODO:
// check if the /web folder is populated.
c.JSON(http.StatusOK, gin.H{

@ -1,12 +1,13 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
"github.com/gin-gonic/gin"
"github.com/samber/lo"
"github.com/sirupsen/logrus"
"net/http"
)
// register devices that are detected by various collectors.
@ -23,14 +24,14 @@ func RegisterDevices(c *gin.Context) {
return
}
//filter any device with empty wwn (they are invalid)
// filter any device with empty wwn (they are invalid)
detectedStorageDevices := lo.Filter[models.Device](collectorDeviceWrapper.Data, func(dev models.Device, _ int) bool {
return len(dev.WWN) > 0
})
errs := []error{}
for _, dev := range detectedStorageDevices {
//insert devices into DB (and update specified columns if device is already registered)
// insert devices into DB (and update specified columns if device is already registered)
// update device fields that may change: (DeviceType, HostID)
if err := deviceRepo.RegisterDevice(c, dev); err != nil {
errs = append(errs, err)

@ -1,11 +1,12 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
func SaveSettings(c *gin.Context) {

@ -1,13 +1,14 @@
package handler
import (
"net/http"
"github.com/analogj/scrutiny/webapp/backend/pkg"
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
"github.com/analogj/scrutiny/webapp/backend/pkg/notify"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
)
// Send test notification

@ -14,13 +14,13 @@ import (
)
func UploadDeviceMetrics(c *gin.Context) {
//db := c.MustGet("DB").(*gorm.DB)
// db := c.MustGet("DB").(*gorm.DB)
logger := c.MustGet("LOGGER").(*logrus.Entry)
appConfig := c.MustGet("CONFIG").(config.Interface)
//influxWriteDb := c.MustGet("INFLUXDB_WRITE").(*api.WriteAPIBlocking)
// influxWriteDb := c.MustGet("INFLUXDB_WRITE").(*api.WriteAPIBlocking)
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
//appConfig := c.MustGet("CONFIG").(config.Interface)
// appConfig := c.MustGet("CONFIG").(config.Interface)
if c.Param("wwn") == "" {
c.JSON(http.StatusBadRequest, gin.H{"success": false})
@ -34,7 +34,7 @@ func UploadDeviceMetrics(c *gin.Context) {
return
}
//update the device information if necessary
// update the device information if necessary
updatedDevice, err := deviceRepo.UpdateDevice(c, c.Param("wwn"), collectorSmartData)
if err != nil {
logger.Errorln("An error occurred while updating device data from smartctl metrics:", err)
@ -51,7 +51,7 @@ func UploadDeviceMetrics(c *gin.Context) {
}
if smartData.Status != pkg.DeviceStatusPassed {
//there is a failure detected by Scrutiny, update the device status on the homepage.
// there is a failure detected by Scrutiny, update the device status on the homepage.
updatedDevice, err = deviceRepo.UpdateDeviceStatus(c, c.Param("wwn"), smartData.Status)
if err != nil {
logger.Errorln("An error occurred while updating device status", err)
@ -68,7 +68,7 @@ func UploadDeviceMetrics(c *gin.Context) {
return
}
//check for error
// check for error
if notify.ShouldNotify(
logger,
updatedDevice,
@ -79,7 +79,7 @@ func UploadDeviceMetrics(c *gin.Context) {
c,
deviceRepo,
) {
//send notifications
// send notifications
liveNotify := notify.New(
logger,
@ -87,7 +87,7 @@ func UploadDeviceMetrics(c *gin.Context) {
updatedDevice,
false,
)
_ = liveNotify.Send() //we ignore error message when sending notifications.
_ = liveNotify.Send() // we ignore error message when sending notifications.
}
c.JSON(http.StatusOK, gin.H{"success": true})

@ -3,5 +3,4 @@ package handler
import "github.com/gin-gonic/gin"
func UploadDeviceSelfTests(c *gin.Context) {
}

@ -3,8 +3,6 @@ package middleware
import (
"bytes"
"fmt"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"io"
"io/ioutil"
"math"
@ -12,6 +10,9 @@ import (
"os"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
)
// Middleware based on https://github.com/toorop/gin-logrus/blob/master/logger.go
@ -29,20 +30,18 @@ var timeFormat = "02/Jan/2006:15:04:05 -0700"
// Logger is the logrus logger handler
func LoggerMiddleware(logger *logrus.Entry) gin.HandlerFunc {
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown"
}
return func(c *gin.Context) {
//clone the request body reader.
// clone the request body reader.
var reqBody string
if c.Request.Body != nil {
buf, _ := ioutil.ReadAll(c.Request.Body)
reqBodyReader1 := ioutil.NopCloser(bytes.NewBuffer(buf))
reqBodyReader2 := ioutil.NopCloser(bytes.NewBuffer(buf)) //We have to create a new Buffer, because reqBodyReader1 will be read.
reqBodyReader2 := ioutil.NopCloser(bytes.NewBuffer(buf)) // We have to create a new Buffer, because reqBodyReader1 will be read.
c.Request.Body = reqBodyReader2
reqBody = readBody(reqBodyReader1)
}
@ -90,7 +89,7 @@ func LoggerMiddleware(logger *logrus.Entry) gin.HandlerFunc {
}
}
if strings.Contains(path, "/api/") {
//only debug log request/response from api endpoint.
// only debug log request/response from api endpoint.
if len(reqBody) > 0 {
entry.WithField("bodyType", "request").Debugln(reqBody) // Print request body
}

@ -2,6 +2,7 @@ package middleware
import (
"context"
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
"github.com/gin-gonic/gin"
@ -9,7 +10,6 @@ import (
)
func RepositoryMiddleware(appConfig config.Interface, globalLogger logrus.FieldLogger) gin.HandlerFunc {
deviceRepo, err := database.NewScrutinyRepository(appConfig, globalLogger)
if err != nil {
panic(err)
@ -21,9 +21,9 @@ func RepositoryMiddleware(appConfig config.Interface, globalLogger logrus.FieldL
panic(err)
}
//settings.UpdateSettingEntries()
// settings.UpdateSettingEntries()
//TODO: determine where we can call defer deviceRepo.Close()
// TODO: determine where we can call defer deviceRepo.Close()
return func(c *gin.Context) {
c.Set("DEVICE_REPOSITORY", deviceRepo)
c.Next()

@ -2,6 +2,10 @@ package web
import (
"fmt"
"net/http"
"path/filepath"
"strings"
"github.com/analogj/go-util/utils"
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
@ -9,9 +13,6 @@ import (
"github.com/analogj/scrutiny/webapp/backend/pkg/web/middleware"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"net/http"
"path/filepath"
"strings"
)
type AppEngine struct {
@ -35,30 +36,30 @@ func (ae *AppEngine) Setup(logger *logrus.Entry) *gin.Engine {
api := base.Group("/api")
{
api.GET("/health", handler.HealthCheck)
api.POST("/health/notify", handler.SendTestNotification) //check if notifications are configured correctly
api.POST("/health/notify", handler.SendTestNotification) // check if notifications are configured correctly
api.POST("/devices/register", handler.RegisterDevices) //used by Collector to register new devices and retrieve filtered list
api.GET("/summary", handler.GetDevicesSummary) //used by Dashboard
api.GET("/summary/temp", handler.GetDevicesSummaryTempHistory) //used by Dashboard (Temperature history dropdown)
api.POST("/device/:wwn/smart", handler.UploadDeviceMetrics) //used by Collector to upload data
api.POST("/devices/register", handler.RegisterDevices) // used by Collector to register new devices and retrieve filtered list
api.GET("/summary", handler.GetDevicesSummary) // used by Dashboard
api.GET("/summary/temp", handler.GetDevicesSummaryTempHistory) // used by Dashboard (Temperature history dropdown)
api.POST("/device/:wwn/smart", handler.UploadDeviceMetrics) // used by Collector to upload data
api.POST("/device/:wwn/selftest", handler.UploadDeviceSelfTests)
api.GET("/device/:wwn/details", handler.GetDeviceDetails) //used by Details
api.DELETE("/device/:wwn", handler.DeleteDevice) //used by UI to delete device
api.GET("/device/:wwn/details", handler.GetDeviceDetails) // used by Details
api.DELETE("/device/:wwn", handler.DeleteDevice) // used by UI to delete device
api.GET("/settings", handler.GetSettings) //used to get settings
api.POST("/settings", handler.SaveSettings) //used to save settings
api.GET("/settings", handler.GetSettings) // used to get settings
api.POST("/settings", handler.SaveSettings) // used to save settings
}
}
//Static request routing
// Static request routing
base.StaticFS("/web", http.Dir(ae.Config.GetString("web.src.frontend.path")))
//redirect base url to /web
// redirect base url to /web
base.GET("/", func(c *gin.Context) {
c.Redirect(http.StatusFound, basePath+"/web")
})
//catch-all, serve index page.
// catch-all, serve index page.
r.NoRoute(func(c *gin.Context) {
c.File(fmt.Sprintf("%s/index.html", ae.Config.GetString("web.src.frontend.path")))
})
@ -66,13 +67,13 @@ func (ae *AppEngine) Setup(logger *logrus.Entry) *gin.Engine {
}
func (ae *AppEngine) Start() error {
//set the gin mode
// set the gin mode
gin.SetMode(gin.ReleaseMode)
if strings.ToLower(ae.Config.GetString("log.level")) == "debug" {
gin.SetMode(gin.DebugMode)
}
//check if the database parent directory exists, fail here rather than in a handler.
// check if the database parent directory exists, fail here rather than in a handler.
if !utils.FileExists(filepath.Dir(ae.Config.GetString("web.database.location"))) {
return errors.ConfigValidationError(fmt.Sprintf(
"Database parent directory does not exist. Please check path (%s)",

@ -55,7 +55,7 @@ func helperReadSmartDataFileFixTimestamp(t *testing.T, smartDataFilepath string)
metricsFileData, err := ioutil.ReadAll(metricsfile)
require.NoError(t, err)
//unmarshal because we need to change the timestamp
// unmarshal because we need to change the timestamp
var smartData collector.SmartInfo
err = json.Unmarshal(metricsFileData, &smartData)
require.NoError(t, err)
@ -86,7 +86,7 @@ func TestServerTestSuite_WithCustomBasePath(t *testing.T) {
}
func (suite *ServerTestSuite) TestHealthRoute() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -119,18 +119,18 @@ func (suite *ServerTestSuite) TestHealthRoute() {
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
//test
// test
w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", suite.Basepath+"/api/health", nil)
router.ServeHTTP(w, req)
//assert
// assert
require.Equal(suite.T(), 200, w.Code)
require.Equal(suite.T(), "{\"success\":true}", w.Body.String())
}
func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -163,17 +163,17 @@ func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
file, err := os.Open("testdata/register-devices-req.json")
require.NoError(suite.T(), err)
//test
// test
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/devices/register", file)
router.ServeHTTP(w, req)
//assert
// assert
require.Equal(suite.T(), 200, w.Code)
}
func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -212,7 +212,7 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
metricsfile := helperReadSmartDataFileFixTimestamp(suite.T(), "testdata/upload-device-metrics-req.json")
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/devices/register", devicesfile)
router.ServeHTTP(wr, req)
@ -223,11 +223,11 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
router.ServeHTTP(mr, req)
require.Equal(suite.T(), 200, mr.Code)
//assert
// assert
}
func (suite *ServerTestSuite) TestPopulateMultiple() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -235,7 +235,7 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
fakeConfig := mock_config.NewMockInterface(mockCtrl)
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
//fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return("testdata/scrutiny_test.db")
// fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return("testdata/scrutiny_test.db")
fakeConfig.EXPECT().GetStringSlice("notify.urls").Return([]string{}).AnyTimes()
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
@ -272,7 +272,7 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
scsifile := helperReadSmartDataFileFixTimestamp(suite.T(), "../models/testdata/smart-scsi.json")
scsi2file := helperReadSmartDataFileFixTimestamp(suite.T(), "../models/testdata/smart-scsi2.json")
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/devices/register", devicesfile)
router.ServeHTTP(wr, req)
@ -303,7 +303,7 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
router.ServeHTTP(s2r, req)
require.Equal(suite.T(), 200, s2r.Code)
//assert
// assert
}
//TODO: this test should use a recorded request/response playback.
@ -332,7 +332,7 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
//}
func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -368,17 +368,17 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
}
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/health/notify", strings.NewReader("{}"))
router.ServeHTTP(wr, req)
//assert
// assert
require.Equal(suite.T(), 500, wr.Code)
}
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -414,17 +414,17 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
}
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/health/notify", strings.NewReader("{}"))
router.ServeHTTP(wr, req)
//assert
// assert
require.Equal(suite.T(), 500, wr.Code)
}
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -460,17 +460,17 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
}
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/health/notify", strings.NewReader("{}"))
router.ServeHTTP(wr, req)
//assert
// assert
require.Equal(suite.T(), 200, wr.Code)
}
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -505,17 +505,17 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
}
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/health/notify", strings.NewReader("{}"))
router.ServeHTTP(wr, req)
//assert
// assert
require.Equal(suite.T(), 500, wr.Code)
}
func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
//setup
// setup
parentPath, _ := ioutil.TempDir("", "")
defer os.RemoveAll(parentPath)
mockCtrl := gomock.NewController(suite.T())
@ -556,7 +556,7 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
metricsfile := helperReadSmartDataFileFixTimestamp(suite.T(), "../models/testdata/smart-nvme2.json")
//test
// test
wr := httptest.NewRecorder()
req, _ := http.NewRequest("POST", suite.Basepath+"/api/devices/register", devicesfile)
router.ServeHTTP(wr, req)
@ -575,7 +575,7 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
err = json.Unmarshal(sr.Body.Bytes(), &deviceSummary)
require.NoError(suite.T(), err)
//assert
// assert
require.Equal(suite.T(), "a4c8e8ed-11a0-4c97-9bba-306440f1b944", deviceSummary.Data.Summary["a4c8e8ed-11a0-4c97-9bba-306440f1b944"].Device.WWN)
require.Equal(suite.T(), pkg.DeviceStatusPassed, deviceSummary.Data.Summary["a4c8e8ed-11a0-4c97-9bba-306440f1b944"].Device.DeviceStatus)
}

Loading…
Cancel
Save