Merge pull request #222 from AnalogJ/influxdb
commit
6a9db6a92b
@ -0,0 +1,5 @@
|
|||||||
|
package shell
|
||||||
|
|
||||||
|
func Create() Interface {
|
||||||
|
return new(localShell)
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
package shell
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create mock using:
|
||||||
|
// mockgen -source=collector/pkg/common/shell/interface.go -destination=collector/pkg/common/shell/mock/mock_shell.go
|
||||||
|
type Interface interface {
|
||||||
|
Command(logger *logrus.Entry, cmdName string, cmdArgs []string, workingDir string, environ []string) (string, error)
|
||||||
|
}
|
@ -0,0 +1,50 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: collector/pkg/common/shell/interface.go
|
||||||
|
|
||||||
|
// Package mock_shell is a generated GoMock package.
|
||||||
|
package mock_shell
|
||||||
|
|
||||||
|
import (
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
logrus "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockInterface is a mock of Interface interface.
|
||||||
|
type MockInterface struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockInterfaceMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
|
||||||
|
type MockInterfaceMockRecorder struct {
|
||||||
|
mock *MockInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockInterface creates a new mock instance.
|
||||||
|
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
|
||||||
|
mock := &MockInterface{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockInterfaceMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command mocks base method.
|
||||||
|
func (m *MockInterface) Command(logger *logrus.Entry, cmdName string, cmdArgs []string, workingDir string, environ []string) (string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Command", logger, cmdName, cmdArgs, workingDir, environ)
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command indicates an expected call of Command.
|
||||||
|
func (mr *MockInterfaceMockRecorder) Command(logger, cmdName, cmdArgs, workingDir, environ interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Command", reflect.TypeOf((*MockInterface)(nil).Command), logger, cmdName, cmdArgs, workingDir, environ)
|
||||||
|
}
|
@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"svn_revision": "5022",
|
||||||
|
"platform_info": "x86_64-linux-5.4.0-45-generic",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"-j",
|
||||||
|
"--scan"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"name": "/dev/bus/0",
|
||||||
|
"info_name": "/dev/bus/0 [megaraid_disk_00]",
|
||||||
|
"type": "megaraid,0",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/bus/0",
|
||||||
|
"info_name": "/dev/bus/0 [megaraid_disk_01]",
|
||||||
|
"type": "megaraid,1",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -0,0 +1,29 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"svn_revision": "4883",
|
||||||
|
"platform_info": "x86_64-linux-4.19.107-Unraid",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"-j",
|
||||||
|
"--scan"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"name": "/dev/nvme0",
|
||||||
|
"info_name": "/dev/nvme0",
|
||||||
|
"type": "nvme",
|
||||||
|
"protocol": "NVMe"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -0,0 +1,65 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"svn_revision": "4883",
|
||||||
|
"platform_info": "x86_64-linux-5.15.32-flatcar",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"--scan",
|
||||||
|
"-j"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"name": "/dev/sda",
|
||||||
|
"info_name": "/dev/sda",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/sdb",
|
||||||
|
"info_name": "/dev/sdb",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/sdc",
|
||||||
|
"info_name": "/dev/sdc",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/sdd",
|
||||||
|
"info_name": "/dev/sdd",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/sde",
|
||||||
|
"info_name": "/dev/sde",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/sdf",
|
||||||
|
"info_name": "/dev/sdf",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/sdg",
|
||||||
|
"info_name": "/dev/sdg",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -0,0 +1 @@
|
|||||||
|
`rootfs` is only used by Dockerfile and Dockerfile.collector
|
@ -0,0 +1,43 @@
|
|||||||
|
# Downsampling
|
||||||
|
|
||||||
|
Scrutiny collects alot of data, that can cause the database to grow unbounded.
|
||||||
|
|
||||||
|
- Smart data
|
||||||
|
- Smart test data
|
||||||
|
- Temperature data
|
||||||
|
- Disk metrics (capacity/usage)
|
||||||
|
- etc
|
||||||
|
|
||||||
|
This data must be accurate in the short term, and is useful for doing trend analysis in the long term.
|
||||||
|
However, for trend analysis we only need aggregate data, individual data points are not as useful.
|
||||||
|
|
||||||
|
Scrutiny will automatically downsample data on a schedule to ensure that the database size stays reasonable, while still
|
||||||
|
ensuring historical data is present for comparisons.
|
||||||
|
|
||||||
|
|
||||||
|
| Bucket Name | Retention Period | Downsampling Range | Downsampling Aggregation Window | Downsampling Cron | Comments |
|
||||||
|
| --- | --- | --- | --- | --- | --- |
|
||||||
|
| `metrics` | 15 days | `-2w -1w` | `1w` | main bucket, weekly on Sunday at 1:00am |
|
||||||
|
| `metrics_weekly` | 9 weeks | `-2mo -1mo` | `1mo` | monthly on first day of the month at 1:30am
|
||||||
|
| `metrics_monthly` | 25 months | `-2y -1y` | `1y` | yearly on the first day of the year at 2:00am
|
||||||
|
| `metrics_yearly` | forever | - | - | - | |
|
||||||
|
|
||||||
|
|
||||||
|
After 5 months, here's how may data points should exist in each bucket for one disk
|
||||||
|
|
||||||
|
| Bucket Name | Datapoints | Comments |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `metrics` | 15 | 7 daily datapoints , up to 7 pending data, 1 buffer data point |
|
||||||
|
| `metrics_weekly` | 9 | 4 aggregated weekly data points, 4 pending datapoints, 1 buffer data point |
|
||||||
|
| `metrics_monthly` | 3 | 3 aggregated monthly data points |
|
||||||
|
| `metrics_yearly` | 0 | |
|
||||||
|
|
||||||
|
After 5 years, here's how may data points should exist in each bucket for one disk
|
||||||
|
|
||||||
|
| Bucket Name | Datapoints | Comments |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `metrics` | - | - |
|
||||||
|
| `metrics_weekly` | - |
|
||||||
|
| `metrics_monthly` | - |
|
||||||
|
| `metrics_yearly` | - |
|
||||||
|
|
@ -0,0 +1,127 @@
|
|||||||
|
# Scrutiny <-> SmartMonTools
|
||||||
|
|
||||||
|
Scrutiny uses `smartctl --scan` to detect devices/drives. If your devices are not being detected by Scrutiny, or some
|
||||||
|
data is missing, this is probably due to a `smartctl` issue.
|
||||||
|
The following page will document commonly asked questions and troubleshooting steps for the Scrutiny S.M.A.R.T. data collector.
|
||||||
|
|
||||||
|
## WWN vs Device name
|
||||||
|
As discussed in [`#117`](https://github.com/AnalogJ/scrutiny/issues/117), `/dev/sd*` device paths are ephemeral.
|
||||||
|
|
||||||
|
> Device paths in Linux aren't guaranteed to be consistent across restarts. Device names consist of major numbers (letters) and minor numbers. When the Linux storage device driver detects a new device, the driver assigns major and minor numbers from the available range to the device. When a device is removed, the device numbers are freed for reuse.
|
||||||
|
>
|
||||||
|
> The problem occurs because device scanning in Linux is scheduled by the SCSI subsystem to happen asynchronously. As a result, a device path name can vary across restarts.
|
||||||
|
>
|
||||||
|
> https://docs.microsoft.com/en-us/troubleshoot/azure/virtual-machines/troubleshoot-device-names-problems
|
||||||
|
|
||||||
|
While the Docker Scrutiny collector does require devices to attached to the docker container by device name (using `--device=/dev/sd..`), internally
|
||||||
|
Scrutiny stores and references the devices by their `WWN` which is globally unique, and never changes.
|
||||||
|
|
||||||
|
As such, passing devices to the Scrutiny collector container using `/dev/disk/by-id/`, `/dev/disk/by-label/`, `/dev/disk/by-path/` and `/dev/disk/by-uuid/`
|
||||||
|
paths are unnecessary, unless you'd like to ensure the docker run command never needs to change.
|
||||||
|
|
||||||
|
|
||||||
|
## Device Detection By Smartctl
|
||||||
|
|
||||||
|
The first thing you'll want to do is run `smartctl` locally (not in Docker) and make sure the output shows all your drives as expected.
|
||||||
|
See the `Drive Types` section below for what this output should look like for `NVMe`/`ATA`/`RAID` drives.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
smartctl --scan
|
||||||
|
|
||||||
|
/dev/sda -d scsi # /dev/sda, SCSI device
|
||||||
|
/dev/sdb -d scsi # /dev/sdb, SCSI device
|
||||||
|
/dev/sdc -d scsi # /dev/sdc, SCSI device
|
||||||
|
/dev/sdd -d scsi # /dev/sdd, SCSI device
|
||||||
|
```
|
||||||
|
|
||||||
|
Once you've verified that `smartctl` correctly detects your drives, make sure scrutiny is correctly detecting them as well.
|
||||||
|
> NOTE: make sure you specify all the devices you'd like scrutiny to process using `--device=` flags.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -it --rm \
|
||||||
|
-v /run/udev:/run/udev:ro \
|
||||||
|
--cap-add SYS_RAWIO \
|
||||||
|
--device=/dev/sda \
|
||||||
|
--device=/dev/sdb \
|
||||||
|
ghcr.io/analogj/scrutiny:master-collector smartctl --scan
|
||||||
|
```
|
||||||
|
|
||||||
|
If the output is the same, your devices will be processed by Scrutiny.
|
||||||
|
|
||||||
|
### Collector Config File
|
||||||
|
In some cases `--scan` does not correctly detect the device type, returning [incomplete SMART data](https://github.com/AnalogJ/scrutiny/issues/45).
|
||||||
|
Scrutiny will supports overriding the detected device type via the config file.
|
||||||
|
|
||||||
|
### RAID Controllers (Megaraid/3ware/HBA/Adaptec/HPE/etc)
|
||||||
|
Smartctl has support for a large number of [RAID controllers](https://www.smartmontools.org/wiki/Supported_RAID-Controllers), however this
|
||||||
|
support is not automatic, and may require some additional device type hinting. You can provide this information to the Scrutiny collector
|
||||||
|
using a collector config file. See [example.collector.yaml](/example.collector.yaml)
|
||||||
|
|
||||||
|
> NOTE: If you use docker, you **must** pass though the RAID virtual disk to the container using `--device` (see below)
|
||||||
|
>
|
||||||
|
> This device may be in `/dev/*` or `/dev/bus/*`.
|
||||||
|
>
|
||||||
|
> If you're unsure, run `smartctl --scan` on your host, and pass all listed devices to the container.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# /scrutiny/config/collector.yaml
|
||||||
|
devices:
|
||||||
|
# Dell PERC/Broadcom Megaraid example: https://github.com/AnalogJ/scrutiny/issues/30
|
||||||
|
- device: /dev/bus/0
|
||||||
|
type:
|
||||||
|
- megaraid,14
|
||||||
|
- megaraid,15
|
||||||
|
- megaraid,18
|
||||||
|
- megaraid,19
|
||||||
|
- megaraid,20
|
||||||
|
- megaraid,21
|
||||||
|
|
||||||
|
- device: /dev/twa0
|
||||||
|
type:
|
||||||
|
- 3ware,0
|
||||||
|
- 3ware,1
|
||||||
|
- 3ware,2
|
||||||
|
- 3ware,3
|
||||||
|
- 3ware,4
|
||||||
|
- 3ware,5
|
||||||
|
|
||||||
|
# Adapec RAID: https://github.com/AnalogJ/scrutiny/issues/189
|
||||||
|
- device: /dev/sdb
|
||||||
|
type:
|
||||||
|
- aacraid,0,0,0
|
||||||
|
- aacraid,0,0,1
|
||||||
|
|
||||||
|
# HPE Smart Array example: https://github.com/AnalogJ/scrutiny/issues/213
|
||||||
|
- device: /dev/sda
|
||||||
|
type:
|
||||||
|
- 'cciss,0'
|
||||||
|
- 'cciss,1'
|
||||||
|
```
|
||||||
|
|
||||||
|
### NVMe Drives
|
||||||
|
As mentioned in the [README.md](/README.md), NVMe devices require both `--cap-add SYS_RAWIO` and `--cap-add SYS_ADMIN`
|
||||||
|
to allow smartctl permission to query your NVMe device SMART data [#26](https://github.com/AnalogJ/scrutiny/issues/26)
|
||||||
|
|
||||||
|
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
|
||||||
|
instead of the block device (`/dev/nvme0n1`). See [#209](https://github.com/AnalogJ/scrutiny/issues/209).
|
||||||
|
|
||||||
|
> The character device /dev/nvme0 is the NVME device controller, and block devices like /dev/nvme0n1 are the NVME storage namespaces: the devices you use for actual storage, which will behave essentially as disks.
|
||||||
|
>
|
||||||
|
> In enterprise-grade hardware, there might be support for several namespaces, thin provisioning within namespaces and other features. For now, you could think namespaces as sort of meta-partitions with extra features for enterprise use.
|
||||||
|
|
||||||
|
### ATA
|
||||||
|
|
||||||
|
### Standby/Sleeping Disks
|
||||||
|
- https://github.com/AnalogJ/scrutiny/issues/221
|
||||||
|
- https://github.com/AnalogJ/scrutiny/issues/157
|
||||||
|
|
||||||
|
### Volume Mount All Devices (`/dev`) - Privileged
|
||||||
|
|
||||||
|
|
||||||
|
## Hub & Spoke model, with multiple Hosts.
|
||||||
|
|
||||||
|
When deploying Scrutiny in a hub & spoke model, it can be difficult to determine exactly which node a set of devices are associated with.
|
||||||
|
Thankfully the collector has a special `--host-id` flag (or `COLLECTOR_HOST_ID` env variable) that can be used to associate devices with a friendly host name.
|
||||||
|
|
||||||
|
See the [docs/INSTALL_HUB_SPOKE.md](/docs/INSTALL_HUB_SPOKE.md) guide for more information.
|
||||||
|
|
@ -0,0 +1,62 @@
|
|||||||
|
|
||||||
|
// SQLite Table(s)
|
||||||
|
Table device {
|
||||||
|
created_at timestamp
|
||||||
|
|
||||||
|
wwn varchar [pk]
|
||||||
|
|
||||||
|
//user provided
|
||||||
|
label varchar
|
||||||
|
host_id varchar
|
||||||
|
|
||||||
|
// smartctl provided
|
||||||
|
device_name varchar
|
||||||
|
manufacturer varchar
|
||||||
|
model_name varchar
|
||||||
|
interface_type varchar
|
||||||
|
interface_speed varchar
|
||||||
|
serial_number varchar
|
||||||
|
firmware varchar
|
||||||
|
rotational_speed varchar
|
||||||
|
capacity varchar
|
||||||
|
form_factor varchar
|
||||||
|
smart_support varchar
|
||||||
|
device_protocol varchar
|
||||||
|
device_type varchar
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// InfluxDB Tables
|
||||||
|
Table device_temperature {
|
||||||
|
//timestamp
|
||||||
|
created_at timestamp
|
||||||
|
|
||||||
|
//tags (indexed & queryable)
|
||||||
|
device_wwn varchar [pk]
|
||||||
|
|
||||||
|
//fields
|
||||||
|
temp bigint
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Table smart_ata_results {
|
||||||
|
//timestamp
|
||||||
|
created_at timestamp
|
||||||
|
|
||||||
|
//tags (indexed & queryable)
|
||||||
|
device_wwn varchar [pk]
|
||||||
|
smart_status varchar
|
||||||
|
scrutiny_status varchar
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//fields
|
||||||
|
temp bigint
|
||||||
|
power_on_hours bigint
|
||||||
|
power_cycle_count bigint
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Ref: device.wwn < smart_ata_results.device_wwn
|
@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/with-contenv bash
|
||||||
|
|
||||||
|
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||||
|
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
@ -0,0 +1,17 @@
|
|||||||
|
#!/usr/bin/with-contenv bash
|
||||||
|
|
||||||
|
mkdir -p /scrutiny/influxdb/
|
||||||
|
|
||||||
|
if [ -f "/scrutiny/influxdb/config.yaml" ]; then
|
||||||
|
echo "influxdb config file already exists. skipping."
|
||||||
|
else
|
||||||
|
cat << 'EOF' > /scrutiny/influxdb/config.yaml
|
||||||
|
bolt-path: /scrutiny/influxdb/influxd.bolt
|
||||||
|
engine-path: /scrutiny/influxdb/engine
|
||||||
|
http-bind-address: ":8086"
|
||||||
|
reporting-disabled: true
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "starting influxdb"
|
||||||
|
influxd run
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
#!/usr/bin/with-contenv bash
|
||||||
|
|
||||||
echo "starting scrutiny"
|
echo "waiting for influxdb"
|
||||||
|
until $(curl --output /dev/null --silent --head --fail http://localhost:8086/health); do echo "influxdb not ready" && sleep 5; done
|
||||||
|
|
||||||
|
echo "starting scrutiny"
|
||||||
scrutiny start
|
scrutiny start
|
||||||
|
@ -0,0 +1,28 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
const DeviceProtocolAta = "ATA"
|
||||||
|
const DeviceProtocolScsi = "SCSI"
|
||||||
|
const DeviceProtocolNvme = "NVMe"
|
||||||
|
|
||||||
|
const SmartAttributeStatusPassed = 0
|
||||||
|
const SmartAttributeStatusFailed = 1
|
||||||
|
const SmartAttributeStatusWarning = 2
|
||||||
|
|
||||||
|
const SmartWhenFailedFailingNow = "FAILING_NOW"
|
||||||
|
const SmartWhenFailedInThePast = "IN_THE_PAST"
|
||||||
|
|
||||||
|
//const SmartStatusPassed = "passed"
|
||||||
|
//const SmartStatusFailed = "failed"
|
||||||
|
|
||||||
|
type DeviceStatus int
|
||||||
|
|
||||||
|
const (
|
||||||
|
DeviceStatusPassed DeviceStatus = 0
|
||||||
|
DeviceStatusFailedSmart DeviceStatus = iota
|
||||||
|
DeviceStatusFailedScrutiny DeviceStatus = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func Set(b, flag DeviceStatus) DeviceStatus { return b | flag }
|
||||||
|
func Clear(b, flag DeviceStatus) DeviceStatus { return b &^ flag }
|
||||||
|
func Toggle(b, flag DeviceStatus) DeviceStatus { return b ^ flag }
|
||||||
|
func Has(b, flag DeviceStatus) bool { return b&flag != 0 }
|
@ -0,0 +1,30 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DeviceRepo interface {
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
//GetSettings()
|
||||||
|
//SaveSetting()
|
||||||
|
|
||||||
|
RegisterDevice(ctx context.Context, dev models.Device) error
|
||||||
|
GetDevices(ctx context.Context) ([]models.Device, error)
|
||||||
|
UpdateDevice(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (models.Device, error)
|
||||||
|
UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error)
|
||||||
|
GetDeviceDetails(ctx context.Context, wwn string) (models.Device, error)
|
||||||
|
|
||||||
|
SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error)
|
||||||
|
GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, attributes []string) ([]measurements.Smart, error)
|
||||||
|
|
||||||
|
SaveSmartTemperature(ctx context.Context, wwn string, deviceProtocol string, collectorSmartData collector.SmartInfo) error
|
||||||
|
|
||||||
|
GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error)
|
||||||
|
GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error)
|
||||||
|
}
|
@ -0,0 +1,47 @@
|
|||||||
|
package m20201107210306
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: m20201107210306.Device is deprecated, only used by db migrations
|
||||||
|
type Device struct {
|
||||||
|
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||||
|
CreatedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
|
DeletedAt *time.Time
|
||||||
|
|
||||||
|
WWN string `json:"wwn" gorm:"primary_key"`
|
||||||
|
HostId string `json:"host_id"`
|
||||||
|
|
||||||
|
DeviceName string `json:"device_name"`
|
||||||
|
Manufacturer string `json:"manufacturer"`
|
||||||
|
ModelName string `json:"model_name"`
|
||||||
|
InterfaceType string `json:"interface_type"`
|
||||||
|
InterfaceSpeed string `json:"interface_speed"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
Firmware string `json:"firmware"`
|
||||||
|
RotationSpeed int `json:"rotational_speed"`
|
||||||
|
Capacity int64 `json:"capacity"`
|
||||||
|
FormFactor string `json:"form_factor"`
|
||||||
|
SmartSupport bool `json:"smart_support"`
|
||||||
|
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||||
|
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||||
|
SmartResults []Smart `gorm:"foreignkey:DeviceWWN" json:"smart_results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const DeviceProtocolAta = "ATA"
|
||||||
|
const DeviceProtocolScsi = "SCSI"
|
||||||
|
const DeviceProtocolNvme = "NVMe"
|
||||||
|
|
||||||
|
func (dv *Device) IsAta() bool {
|
||||||
|
return dv.DeviceProtocol == DeviceProtocolAta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dv *Device) IsScsi() bool {
|
||||||
|
return dv.DeviceProtocol == DeviceProtocolScsi
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dv *Device) IsNvme() bool {
|
||||||
|
return dv.DeviceProtocol == DeviceProtocolNvme
|
||||||
|
}
|
@ -0,0 +1,26 @@
|
|||||||
|
package m20201107210306
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: m20201107210306.Smart is deprecated, only used by db migrations
|
||||||
|
type Smart struct {
|
||||||
|
gorm.Model
|
||||||
|
|
||||||
|
DeviceWWN string `json:"device_wwn"`
|
||||||
|
Device Device `json:"-" gorm:"foreignkey:DeviceWWN"` // use DeviceWWN as foreign key
|
||||||
|
|
||||||
|
TestDate time.Time `json:"date"`
|
||||||
|
SmartStatus string `json:"smart_status"` // SmartStatusPassed or SmartStatusFailed
|
||||||
|
|
||||||
|
//Metrics
|
||||||
|
Temp int64 `json:"temp"`
|
||||||
|
PowerOnHours int64 `json:"power_on_hours"`
|
||||||
|
PowerCycleCount int64 `json:"power_cycle_count"`
|
||||||
|
|
||||||
|
AtaAttributes []SmartAtaAttribute `json:"ata_attributes" gorm:"foreignkey:SmartId"`
|
||||||
|
NvmeAttributes []SmartNvmeAttribute `json:"nvme_attributes" gorm:"foreignkey:SmartId"`
|
||||||
|
ScsiAttributes []SmartScsiAttribute `json:"scsi_attributes" gorm:"foreignkey:SmartId"`
|
||||||
|
}
|
@ -0,0 +1,26 @@
|
|||||||
|
package m20201107210306
|
||||||
|
|
||||||
|
import "gorm.io/gorm"
|
||||||
|
|
||||||
|
// Deprecated: m20201107210306.SmartAtaAttribute is deprecated, only used by db migrations
|
||||||
|
type SmartAtaAttribute struct {
|
||||||
|
gorm.Model
|
||||||
|
|
||||||
|
SmartId int `json:"smart_id"`
|
||||||
|
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
|
||||||
|
|
||||||
|
AttributeId int `json:"attribute_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value int `json:"value"`
|
||||||
|
Worst int `json:"worst"`
|
||||||
|
Threshold int `json:"thresh"`
|
||||||
|
RawValue int64 `json:"raw_value"`
|
||||||
|
RawString string `json:"raw_string"`
|
||||||
|
WhenFailed string `json:"when_failed"`
|
||||||
|
|
||||||
|
TransformedValue int64 `json:"transformed_value"`
|
||||||
|
Status string `gorm:"-" json:"status,omitempty"`
|
||||||
|
StatusReason string `gorm:"-" json:"status_reason,omitempty"`
|
||||||
|
FailureRate float64 `gorm:"-" json:"failure_rate,omitempty"`
|
||||||
|
History []SmartAtaAttribute `gorm:"-" json:"history,omitempty"`
|
||||||
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
package m20201107210306
|
||||||
|
|
||||||
|
import "gorm.io/gorm"
|
||||||
|
|
||||||
|
// Deprecated: m20201107210306.SmartNvmeAttribute is deprecated, only used by db migrations
|
||||||
|
type SmartNvmeAttribute struct {
|
||||||
|
gorm.Model
|
||||||
|
|
||||||
|
SmartId int `json:"smart_id"`
|
||||||
|
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
|
||||||
|
|
||||||
|
AttributeId string `json:"attribute_id"` //json string from smartctl
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value int `json:"value"`
|
||||||
|
Threshold int `json:"thresh"`
|
||||||
|
|
||||||
|
TransformedValue int64 `json:"transformed_value"`
|
||||||
|
Status string `gorm:"-" json:"status,omitempty"`
|
||||||
|
StatusReason string `gorm:"-" json:"status_reason,omitempty"`
|
||||||
|
FailureRate float64 `gorm:"-" json:"failure_rate,omitempty"`
|
||||||
|
History []SmartNvmeAttribute `gorm:"-" json:"history,omitempty"`
|
||||||
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
package m20201107210306
|
||||||
|
|
||||||
|
import "gorm.io/gorm"
|
||||||
|
|
||||||
|
// Deprecated: m20201107210306.SmartScsiAttribute is deprecated, only used by db migrations
|
||||||
|
type SmartScsiAttribute struct {
|
||||||
|
gorm.Model
|
||||||
|
|
||||||
|
SmartId int `json:"smart_id"`
|
||||||
|
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
|
||||||
|
|
||||||
|
AttributeId string `json:"attribute_id"` //json string from smartctl
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value int `json:"value"`
|
||||||
|
Threshold int `json:"thresh"`
|
||||||
|
|
||||||
|
TransformedValue int64 `json:"transformed_value"`
|
||||||
|
Status string `gorm:"-" json:"status,omitempty"`
|
||||||
|
StatusReason string `gorm:"-" json:"status_reason,omitempty"`
|
||||||
|
FailureRate float64 `gorm:"-" json:"failure_rate,omitempty"`
|
||||||
|
History []SmartScsiAttribute `gorm:"-" json:"history,omitempty"`
|
||||||
|
}
|
@ -0,0 +1,407 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
|
"github.com/influxdata/influxdb-client-go/v2/domain"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"gorm.io/driver/sqlite"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// 60seconds * 60minutes * 24hours * 15 days
|
||||||
|
RETENTION_PERIOD_15_DAYS_IN_SECONDS = 1_296_000
|
||||||
|
|
||||||
|
// 60seconds * 60minutes * 24hours * 7 days * 9 weeks
|
||||||
|
RETENTION_PERIOD_9_WEEKS_IN_SECONDS = 5_443_200
|
||||||
|
|
||||||
|
// 60seconds * 60minutes * 24hours * 7 days * (52 + 52 + 4)weeks
|
||||||
|
RETENTION_PERIOD_25_MONTHS_IN_SECONDS = 65_318_400
|
||||||
|
|
||||||
|
DURATION_KEY_WEEK = "week"
|
||||||
|
DURATION_KEY_MONTH = "month"
|
||||||
|
DURATION_KEY_YEAR = "year"
|
||||||
|
DURATION_KEY_FOREVER = "forever"
|
||||||
|
)
|
||||||
|
|
||||||
|
//// GormLogger is a custom logger for Gorm, making it use logrus.
|
||||||
|
//type GormLogger struct{ Logger logrus.FieldLogger }
|
||||||
|
//
|
||||||
|
//// Print handles log events from Gorm for the custom logger.
|
||||||
|
//func (gl *GormLogger) Print(v ...interface{}) {
|
||||||
|
// switch v[0] {
|
||||||
|
// case "sql":
|
||||||
|
// gl.Logger.WithFields(
|
||||||
|
// logrus.Fields{
|
||||||
|
// "module": "gorm",
|
||||||
|
// "type": "sql",
|
||||||
|
// "rows": v[5],
|
||||||
|
// "src_ref": v[1],
|
||||||
|
// "values": v[4],
|
||||||
|
// },
|
||||||
|
// ).Debug(v[3])
|
||||||
|
// case "log":
|
||||||
|
// gl.Logger.WithFields(logrus.Fields{"module": "gorm", "type": "log"}).Print(v[2])
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
|
||||||
|
func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.FieldLogger) (DeviceRepo, error) {
|
||||||
|
backgroundContext := context.Background()
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Gorm/SQLite setup
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
globalLogger.Infof("Trying to connect to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
|
||||||
|
database, err := gorm.Open(sqlite.Open(appConfig.GetString("web.database.location")), &gorm.Config{
|
||||||
|
//TODO: figure out how to log database queries again.
|
||||||
|
//Logger: logger
|
||||||
|
DisableForeignKeyConstraintWhenMigrating: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to connect to database! - %v", err)
|
||||||
|
}
|
||||||
|
globalLogger.Infof("Successfully connected to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
|
||||||
|
|
||||||
|
//database.SetLogger()
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// InfluxDB setup
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// Create a new client using an InfluxDB server base URL and an authentication token
|
||||||
|
influxdbUrl := fmt.Sprintf("http://%s:%s", appConfig.GetString("web.influxdb.host"), appConfig.GetString("web.influxdb.port"))
|
||||||
|
globalLogger.Debugf("InfluxDB url: %s", influxdbUrl)
|
||||||
|
|
||||||
|
client := influxdb2.NewClient(influxdbUrl, appConfig.GetString("web.influxdb.token"))
|
||||||
|
|
||||||
|
if !appConfig.IsSet("web.influxdb.token") {
|
||||||
|
globalLogger.Debugf("No influxdb token found, running first-time setup...")
|
||||||
|
|
||||||
|
// if no token is provided, but we have a valid server, we're going to assume this is the first setup of our server.
|
||||||
|
// we will initialize with a predetermined username & password, that you should change.
|
||||||
|
|
||||||
|
// metrics bucket will have a retention period of 8 days (since it will be down-sampled once a week)
|
||||||
|
// in seconds (60seconds * 60minutes * 24hours * 15 days) = 1_296_000 (see EnsureBucket() function)
|
||||||
|
onboardingResponse, err := client.Setup(
|
||||||
|
backgroundContext,
|
||||||
|
appConfig.GetString("web.influxdb.init_username"),
|
||||||
|
appConfig.GetString("web.influxdb.init_password"),
|
||||||
|
appConfig.GetString("web.influxdb.org"),
|
||||||
|
appConfig.GetString("web.influxdb.bucket"),
|
||||||
|
0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
appConfig.Set("web.influxdb.token", *onboardingResponse.Auth.Token)
|
||||||
|
// we should write the config file out here. Ignore failures.
|
||||||
|
err = appConfig.WriteConfig()
|
||||||
|
if err != nil {
|
||||||
|
globalLogger.Infof("ignoring error while writing influxdb info to config: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use blocking write client for writes to desired bucket
|
||||||
|
writeAPI := client.WriteAPIBlocking(appConfig.GetString("web.influxdb.org"), appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
|
||||||
|
// Get query client
|
||||||
|
queryAPI := client.QueryAPI(appConfig.GetString("web.influxdb.org"))
|
||||||
|
|
||||||
|
// Get task client
|
||||||
|
taskAPI := client.TasksAPI()
|
||||||
|
|
||||||
|
if writeAPI == nil || queryAPI == nil || taskAPI == nil {
|
||||||
|
return nil, fmt.Errorf("Failed to connect to influxdb!")
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceRepo := scrutinyRepository{
|
||||||
|
appConfig: appConfig,
|
||||||
|
logger: globalLogger,
|
||||||
|
influxClient: client,
|
||||||
|
influxWriteApi: writeAPI,
|
||||||
|
influxQueryApi: queryAPI,
|
||||||
|
influxTaskApi: taskAPI,
|
||||||
|
gormClient: database,
|
||||||
|
}
|
||||||
|
|
||||||
|
orgInfo, err := client.OrganizationsAPI().FindOrganizationByName(backgroundContext, appConfig.GetString("web.influxdb.org"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize Buckets (if necessary)
|
||||||
|
err = deviceRepo.EnsureBuckets(backgroundContext, orgInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize Background Tasks
|
||||||
|
err = deviceRepo.EnsureTasks(backgroundContext, *orgInfo.Id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// InfluxDB & SQLite migrations
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//database.AutoMigrate(&models.Device{})
|
||||||
|
err = deviceRepo.Migrate(backgroundContext)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &deviceRepo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type scrutinyRepository struct {
|
||||||
|
appConfig config.Interface
|
||||||
|
logger logrus.FieldLogger
|
||||||
|
|
||||||
|
influxWriteApi api.WriteAPIBlocking
|
||||||
|
influxQueryApi api.QueryAPI
|
||||||
|
influxTaskApi api.TasksAPI
|
||||||
|
influxClient influxdb2.Client
|
||||||
|
|
||||||
|
gormClient *gorm.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) Close() error {
|
||||||
|
sr.influxClient.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Organization) error {
|
||||||
|
|
||||||
|
var mainBucketRetentionRule domain.RetentionRule
|
||||||
|
var weeklyBucketRetentionRule domain.RetentionRule
|
||||||
|
var monthlyBucketRetentionRule domain.RetentionRule
|
||||||
|
if sr.appConfig.GetBool("web.influxdb.retention_policy") {
|
||||||
|
|
||||||
|
// in tests, we may not want to set a retention policy. If "false", we can set data with old timestamps,
|
||||||
|
// then manually run the down sampling scripts. This should be true for production environments.
|
||||||
|
mainBucketRetentionRule = domain.RetentionRule{EverySeconds: RETENTION_PERIOD_15_DAYS_IN_SECONDS}
|
||||||
|
weeklyBucketRetentionRule = domain.RetentionRule{EverySeconds: RETENTION_PERIOD_9_WEEKS_IN_SECONDS}
|
||||||
|
monthlyBucketRetentionRule = domain.RetentionRule{EverySeconds: RETENTION_PERIOD_25_MONTHS_IN_SECONDS}
|
||||||
|
}
|
||||||
|
|
||||||
|
mainBucket := sr.appConfig.GetString("web.influxdb.bucket")
|
||||||
|
if foundMainBucket, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, mainBucket); foundErr != nil {
|
||||||
|
// metrics bucket will have a retention period of 15 days (since it will be down-sampled once a week)
|
||||||
|
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, mainBucket, mainBucketRetentionRule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
|
||||||
|
//correctly set the retention period for the main bucket (cant do it during setup/creation)
|
||||||
|
foundMainBucket.RetentionRules = domain.RetentionRules{mainBucketRetentionRule}
|
||||||
|
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundMainBucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
//create buckets (used for downsampling)
|
||||||
|
weeklyBucket := fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, weeklyBucket); foundErr != nil {
|
||||||
|
// metrics_weekly bucket will have a retention period of 8+1 weeks (since it will be down-sampled once a month)
|
||||||
|
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, weeklyBucket, weeklyBucketRetentionRule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
monthlyBucket := fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, monthlyBucket); foundErr != nil {
|
||||||
|
// metrics_monthly bucket will have a retention period of 24+1 months (since it will be down-sampled once a year)
|
||||||
|
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, monthlyBucket, monthlyBucketRetentionRule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yearlyBucket := fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, yearlyBucket); foundErr != nil {
|
||||||
|
// metrics_yearly bucket will have an infinite retention period
|
||||||
|
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, yearlyBucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// DeviceSummary
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// get a map of all devices and associated SMART data
|
||||||
|
func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error) {
|
||||||
|
devices, err := sr.GetDevices(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
summaries := map[string]*models.DeviceSummary{}
|
||||||
|
|
||||||
|
for _, device := range devices {
|
||||||
|
summaries[device.WWN] = &models.DeviceSummary{Device: device}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get parser flux query result
|
||||||
|
//appConfig.GetString("web.influxdb.bucket")
|
||||||
|
queryStr := fmt.Sprintf(`
|
||||||
|
import "influxdata/influxdb/schema"
|
||||||
|
bucketBaseName = "%s"
|
||||||
|
|
||||||
|
dailyData = from(bucket: bucketBaseName)
|
||||||
|
|> range(start: -10y, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|
|> last()
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|
||||||
|
weeklyData = from(bucket: bucketBaseName + "_weekly")
|
||||||
|
|> range(start: -10y, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|
|> last()
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|
||||||
|
monthlyData = from(bucket: bucketBaseName + "_monthly")
|
||||||
|
|> range(start: -10y, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|
|> last()
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|
||||||
|
yearlyData = from(bucket: bucketBaseName + "_yearly")
|
||||||
|
|> range(start: -10y, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|
|> last()
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|
||||||
|
union(tables: [dailyData, weeklyData, monthlyData, yearlyData])
|
||||||
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|> last(column: "device_wwn")
|
||||||
|
|> yield(name: "last")
|
||||||
|
`,
|
||||||
|
sr.appConfig.GetString("web.influxdb.bucket"),
|
||||||
|
)
|
||||||
|
|
||||||
|
result, err := sr.influxQueryApi.Query(ctx, queryStr)
|
||||||
|
if err == nil {
|
||||||
|
// Use Next() to iterate over query result lines
|
||||||
|
for result.Next() {
|
||||||
|
// Observe when there is new grouping key producing new table
|
||||||
|
if result.TableChanged() {
|
||||||
|
//fmt.Printf("table: %s\n", result.TableMetadata().String())
|
||||||
|
}
|
||||||
|
// read result
|
||||||
|
|
||||||
|
//get summary data from Influxdb.
|
||||||
|
//result.Record().Values()
|
||||||
|
if deviceWWN, ok := result.Record().Values()["device_wwn"]; ok {
|
||||||
|
|
||||||
|
//ensure summaries is intialized for this wwn
|
||||||
|
if _, exists := summaries[deviceWWN.(string)]; !exists {
|
||||||
|
summaries[deviceWWN.(string)] = &models.DeviceSummary{}
|
||||||
|
}
|
||||||
|
|
||||||
|
summaries[deviceWWN.(string)].SmartResults = &models.SmartSummary{
|
||||||
|
Temp: result.Record().Values()["temp"].(int64),
|
||||||
|
PowerOnHours: result.Record().Values()["power_on_hours"].(int64),
|
||||||
|
CollectorDate: result.Record().Values()["_time"].(time.Time),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if result.Err() != nil {
|
||||||
|
fmt.Printf("Query error: %s\n", result.Err().Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceTempHistory, err := sr.GetSmartTemperatureHistory(ctx, DURATION_KEY_FOREVER)
|
||||||
|
if err != nil {
|
||||||
|
sr.logger.Printf("========================>>>>>>>>======================")
|
||||||
|
sr.logger.Printf("========================>>>>>>>>======================")
|
||||||
|
sr.logger.Printf("========================>>>>>>>>======================")
|
||||||
|
sr.logger.Printf("========================>>>>>>>>======================")
|
||||||
|
sr.logger.Printf("========================>>>>>>>>======================")
|
||||||
|
sr.logger.Printf("Error: %v", err)
|
||||||
|
}
|
||||||
|
for wwn, tempHistory := range deviceTempHistory {
|
||||||
|
summaries[wwn].TempHistory = tempHistory
|
||||||
|
}
|
||||||
|
|
||||||
|
return summaries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Helper Methods
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) lookupBucketName(durationKey string) string {
|
||||||
|
switch durationKey {
|
||||||
|
case DURATION_KEY_WEEK:
|
||||||
|
//data stored in the last week
|
||||||
|
return sr.appConfig.GetString("web.influxdb.bucket")
|
||||||
|
case DURATION_KEY_MONTH:
|
||||||
|
// data stored in the last month (after the first week)
|
||||||
|
return fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
case DURATION_KEY_YEAR:
|
||||||
|
// data stored in the last year (after the first month)
|
||||||
|
return fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
case DURATION_KEY_FOREVER:
|
||||||
|
//data stored before the last year
|
||||||
|
return fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
}
|
||||||
|
return sr.appConfig.GetString("web.influxdb.bucket")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
|
||||||
|
|
||||||
|
switch durationKey {
|
||||||
|
case DURATION_KEY_WEEK:
|
||||||
|
//data stored in the last week
|
||||||
|
return []string{"-1w", "now()"}
|
||||||
|
case DURATION_KEY_MONTH:
|
||||||
|
// data stored in the last month (after the first week)
|
||||||
|
return []string{"-1mo", "-1w"}
|
||||||
|
case DURATION_KEY_YEAR:
|
||||||
|
// data stored in the last year (after the first month)
|
||||||
|
return []string{"-1y", "-1mo"}
|
||||||
|
case DURATION_KEY_FOREVER:
|
||||||
|
//data stored before the last year
|
||||||
|
return []string{"-10y", "-1y"}
|
||||||
|
}
|
||||||
|
return []string{"-1w", "now()"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []string {
|
||||||
|
switch durationKey {
|
||||||
|
case DURATION_KEY_WEEK:
|
||||||
|
//all data is stored in a single bucket
|
||||||
|
return []string{DURATION_KEY_WEEK}
|
||||||
|
case DURATION_KEY_MONTH:
|
||||||
|
//data is stored in the week bucket and the month bucket
|
||||||
|
return []string{DURATION_KEY_WEEK, DURATION_KEY_MONTH}
|
||||||
|
case DURATION_KEY_YEAR:
|
||||||
|
// data stored in the last year (after the first month)
|
||||||
|
return []string{DURATION_KEY_WEEK, DURATION_KEY_MONTH, DURATION_KEY_YEAR}
|
||||||
|
case DURATION_KEY_FOREVER:
|
||||||
|
//data stored before the last year
|
||||||
|
return []string{DURATION_KEY_WEEK, DURATION_KEY_MONTH, DURATION_KEY_YEAR, DURATION_KEY_FOREVER}
|
||||||
|
}
|
||||||
|
return []string{DURATION_KEY_WEEK}
|
||||||
|
}
|
@ -0,0 +1,74 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"gorm.io/gorm/clause"
|
||||||
|
)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Device
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
//insert device into DB (and update specified columns if device is already registered)
|
||||||
|
// update device fields that may change: (DeviceType, HostID)
|
||||||
|
func (sr *scrutinyRepository) RegisterDevice(ctx context.Context, dev models.Device) error {
|
||||||
|
if err := sr.gormClient.WithContext(ctx).Clauses(clause.OnConflict{
|
||||||
|
Columns: []clause.Column{{Name: "wwn"}},
|
||||||
|
DoUpdates: clause.AssignmentColumns([]string{"host_id", "device_name", "device_type"}),
|
||||||
|
}).Create(&dev).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// get a list of all devices (only device metadata, no SMART data)
|
||||||
|
func (sr *scrutinyRepository) GetDevices(ctx context.Context) ([]models.Device, error) {
|
||||||
|
//Get a list of all the active devices.
|
||||||
|
devices := []models.Device{}
|
||||||
|
if err := sr.gormClient.WithContext(ctx).Find(&devices).Error; err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not get device summary from DB: %v", err)
|
||||||
|
}
|
||||||
|
return devices, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// update device (only metadata) from collector
|
||||||
|
func (sr *scrutinyRepository) UpdateDevice(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (models.Device, error) {
|
||||||
|
var device models.Device
|
||||||
|
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
||||||
|
return device, fmt.Errorf("Could not get device from DB: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO catch GormClient err
|
||||||
|
err := device.UpdateFromCollectorSmartInfo(collectorSmartData)
|
||||||
|
if err != nil {
|
||||||
|
return device, err
|
||||||
|
}
|
||||||
|
return device, sr.gormClient.Model(&device).Updates(device).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
//Update Device Status
|
||||||
|
func (sr *scrutinyRepository) UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error) {
|
||||||
|
var device models.Device
|
||||||
|
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
||||||
|
return device, fmt.Errorf("Could not get device from DB: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
device.DeviceStatus = pkg.Set(device.DeviceStatus, status)
|
||||||
|
return device, sr.gormClient.Model(&device).Updates(device).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) GetDeviceDetails(ctx context.Context, wwn string) (models.Device, error) {
|
||||||
|
var device models.Device
|
||||||
|
|
||||||
|
fmt.Println("GetDeviceDetails from GORM")
|
||||||
|
|
||||||
|
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
||||||
|
return models.Device{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return device, nil
|
||||||
|
}
|
@ -0,0 +1,169 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// SMART
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error) {
|
||||||
|
deviceSmartData := measurements.Smart{}
|
||||||
|
err := deviceSmartData.FromCollectorSmartInfo(wwn, collectorSmartData)
|
||||||
|
if err != nil {
|
||||||
|
sr.logger.Errorln("Could not process SMART metrics", err)
|
||||||
|
return measurements.Smart{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tags, fields := deviceSmartData.Flatten()
|
||||||
|
|
||||||
|
// write point immediately
|
||||||
|
return deviceSmartData, sr.saveDatapoint(sr.influxWriteApi, "smart", tags, fields, deviceSmartData.Date, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, attributes []string) ([]measurements.Smart, error) {
|
||||||
|
// Get SMartResults from InfluxDB
|
||||||
|
|
||||||
|
//TODO: change the filter startrange to a real number.
|
||||||
|
|
||||||
|
// Get parser flux query result
|
||||||
|
//appConfig.GetString("web.influxdb.bucket")
|
||||||
|
queryStr := sr.aggregateSmartAttributesQuery(wwn, durationKey)
|
||||||
|
log.Infoln(queryStr)
|
||||||
|
|
||||||
|
smartResults := []measurements.Smart{}
|
||||||
|
|
||||||
|
result, err := sr.influxQueryApi.Query(ctx, queryStr)
|
||||||
|
if err == nil {
|
||||||
|
// Use Next() to iterate over query result lines
|
||||||
|
for result.Next() {
|
||||||
|
// Observe when there is new grouping key producing new table
|
||||||
|
if result.TableChanged() {
|
||||||
|
//fmt.Printf("table: %s\n", result.TableMetadata().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
smartData, err := measurements.NewSmartFromInfluxDB(result.Record().Values())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
smartResults = append(smartResults, *smartData)
|
||||||
|
|
||||||
|
}
|
||||||
|
if result.Err() != nil {
|
||||||
|
fmt.Printf("Query error: %s\n", result.Err().Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return smartResults, nil
|
||||||
|
|
||||||
|
//if err := device.SquashHistory(); err != nil {
|
||||||
|
// logger.Errorln("An error occurred while squashing device history", err)
|
||||||
|
// c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
// return
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//if err := device.ApplyMetadataRules(); err != nil {
|
||||||
|
// logger.Errorln("An error occurred while applying scrutiny thresholds & rules", err)
|
||||||
|
// c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
// return
|
||||||
|
//}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Helper Methods
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) saveDatapoint(influxWriteApi api.WriteAPIBlocking, measurement string, tags map[string]string, fields map[string]interface{}, date time.Time, ctx context.Context) error {
|
||||||
|
//sr.logger.Debugf("Storing datapoint in measurement '%s'. tags: %d fields: %d", measurement, len(tags), len(fields))
|
||||||
|
p := influxdb2.NewPoint(measurement,
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
date)
|
||||||
|
|
||||||
|
// write point immediately
|
||||||
|
return influxWriteApi.WritePoint(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, durationKey string) string {
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
import "influxdata/influxdb/schema"
|
||||||
|
weekData = from(bucket: "metrics")
|
||||||
|
|> range(start: -1w, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|
|> range(start: -1mo, stop: -1w)
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
|
yearData = from(bucket: "metrics_monthly")
|
||||||
|
|> range(start: -1y, stop: -1mo)
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
|
foreverData = from(bucket: "metrics_yearly")
|
||||||
|
|> range(start: -10y, stop: -1y)
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
|
union(tables: [weekData, monthData, yearData, foreverData])
|
||||||
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|
|> yield(name: "last")
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
partialQueryStr := []string{
|
||||||
|
`import "influxdata/influxdb/schema"`,
|
||||||
|
}
|
||||||
|
|
||||||
|
nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey)
|
||||||
|
|
||||||
|
subQueryNames := []string{}
|
||||||
|
for _, nestedDurationKey := range nestedDurationKeys {
|
||||||
|
bucketName := sr.lookupBucketName(nestedDurationKey)
|
||||||
|
durationRange := sr.lookupDuration(nestedDurationKey)
|
||||||
|
|
||||||
|
subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey))
|
||||||
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
|
fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName),
|
||||||
|
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
|
||||||
|
`|> filter(fn: (r) => r["_measurement"] == "smart" )`,
|
||||||
|
fmt.Sprintf(`|> filter(fn: (r) => r["device_wwn"] == "%s" )`, wwn),
|
||||||
|
"|> schema.fieldsAsCols()",
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subQueryNames) == 1 {
|
||||||
|
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
|
||||||
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
|
subQueryNames[0],
|
||||||
|
`|> yield()`,
|
||||||
|
}...)
|
||||||
|
} else {
|
||||||
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
|
fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")),
|
||||||
|
`|> sort(columns: ["_time"], desc: false)`,
|
||||||
|
`|> yield(name: "last")`,
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(partialQueryStr, "\n")
|
||||||
|
}
|
@ -0,0 +1,451 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20201107210306"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"github.com/go-gormigrate/gormigrate/v2"
|
||||||
|
_ "github.com/jinzhu/gorm/dialects/sqlite"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// SQLite migrations
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//database.AutoMigrate(&models.Device{})
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||||
|
|
||||||
|
sr.logger.Infoln("Database migration starting")
|
||||||
|
|
||||||
|
m := gormigrate.New(sr.gormClient, gormigrate.DefaultOptions, []*gormigrate.Migration{
|
||||||
|
{
|
||||||
|
ID: "20201107210306", // v0.3.13 (pre-influxdb schema). 9fac3c6308dc6cb6cd5bbc43a68cd93e8fb20b87
|
||||||
|
Migrate: func(tx *gorm.DB) error {
|
||||||
|
// it's a good practice to copy the struct inside the function,
|
||||||
|
|
||||||
|
return tx.AutoMigrate(
|
||||||
|
&m20201107210306.Device{},
|
||||||
|
&m20201107210306.Smart{},
|
||||||
|
&m20201107210306.SmartAtaAttribute{},
|
||||||
|
&m20201107210306.SmartNvmeAttribute{},
|
||||||
|
&m20201107210306.SmartNvmeAttribute{},
|
||||||
|
)
|
||||||
|
},
|
||||||
|
Rollback: func(tx *gorm.DB) error {
|
||||||
|
return tx.Migrator().DropTable(
|
||||||
|
&m20201107210306.Device{},
|
||||||
|
&m20201107210306.Smart{},
|
||||||
|
&m20201107210306.SmartAtaAttribute{},
|
||||||
|
&m20201107210306.SmartNvmeAttribute{},
|
||||||
|
&m20201107210306.SmartNvmeAttribute{},
|
||||||
|
"self_tests",
|
||||||
|
)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "20220503113100", // backwards compatible - influxdb schema
|
||||||
|
Migrate: func(tx *gorm.DB) error {
|
||||||
|
// delete unnecessary table.
|
||||||
|
err := tx.Migrator().DropTable("self_tests")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//add columns to the Device schema, so we can start adding data to the database & influxdb
|
||||||
|
err = tx.Migrator().AddColumn(&models.Device{}, "Label") //Label string `json:"label"`
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = tx.Migrator().AddColumn(&models.Device{}, "DeviceStatus") //DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: migrate the data from GORM to influxdb.
|
||||||
|
//get a list of all devices:
|
||||||
|
// get a list of all smart scans in the last 2 weeks:
|
||||||
|
// get a list of associated smart attribute data:
|
||||||
|
// translate to a measurements.Smart{} object
|
||||||
|
// call CUSTOM INFLUXDB SAVE FUNCTION (taking bucket as parameter)
|
||||||
|
// get a list of all smart scans in the last 9 weeks:
|
||||||
|
// do same as above (select 1 scan per week)
|
||||||
|
// get a list of all smart scans in the last 25 months:
|
||||||
|
// do same as above (select 1 scan per month)
|
||||||
|
// get a list of all smart scans:
|
||||||
|
// do same as above (select 1 scan per year)
|
||||||
|
|
||||||
|
preDevices := []m20201107210306.Device{} //pre-migration device information
|
||||||
|
if err = tx.Preload("SmartResults", func(db *gorm.DB) *gorm.DB {
|
||||||
|
return db.Order("smarts.created_at ASC") //OLD: .Limit(devicesCount)
|
||||||
|
}).Find(&preDevices).Error; err != nil {
|
||||||
|
sr.logger.Errorln("Could not get device summary from DB", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//calculate bucket oldest dates
|
||||||
|
today := time.Now()
|
||||||
|
dailyBucketMax := today.Add(-RETENTION_PERIOD_15_DAYS_IN_SECONDS * time.Second) //15 days
|
||||||
|
weeklyBucketMax := today.Add(-RETENTION_PERIOD_9_WEEKS_IN_SECONDS * time.Second) //9 weeks
|
||||||
|
monthlyBucketMax := today.Add(-RETENTION_PERIOD_25_MONTHS_IN_SECONDS * time.Second) //25 weeks
|
||||||
|
|
||||||
|
for _, preDevice := range preDevices {
|
||||||
|
sr.logger.Debugf("====================================")
|
||||||
|
sr.logger.Infof("begin processing device: %s", preDevice.WWN)
|
||||||
|
|
||||||
|
//weekly, monthly, yearly lookup storage, so we don't add more data to the buckets than necessary.
|
||||||
|
weeklyLookup := map[string]bool{}
|
||||||
|
monthlyLookup := map[string]bool{}
|
||||||
|
yearlyLookup := map[string]bool{}
|
||||||
|
for _, preSmartResult := range preDevice.SmartResults { //pre-migration smart results
|
||||||
|
|
||||||
|
//we're looping in ASC mode, so from oldest entry to most current.
|
||||||
|
|
||||||
|
err, postSmartResults := m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(tx, preDevice, preSmartResult)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
smartTags, smartFields := postSmartResults.Flatten()
|
||||||
|
|
||||||
|
err, postSmartTemp := m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice, preSmartResult)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tempTags, tempFields := postSmartTemp.Flatten()
|
||||||
|
tempTags["device_wwn"] = preDevice.WWN
|
||||||
|
|
||||||
|
year, week := postSmartResults.Date.ISOWeek()
|
||||||
|
month := postSmartResults.Date.Month()
|
||||||
|
|
||||||
|
yearStr := strconv.Itoa(year)
|
||||||
|
yearMonthStr := fmt.Sprintf("%d-%d", year, month)
|
||||||
|
yearWeekStr := fmt.Sprintf("%d-%d", year, week)
|
||||||
|
|
||||||
|
//write data to daily bucket if in the last 15 days
|
||||||
|
if postSmartResults.Date.After(dailyBucketMax) {
|
||||||
|
sr.logger.Debugf("device (%s) smart data added to bucket: daily", preDevice.WWN)
|
||||||
|
// write point immediately
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), sr.appConfig.GetString("web.influxdb.bucket")),
|
||||||
|
"smart",
|
||||||
|
smartTags,
|
||||||
|
smartFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), sr.appConfig.GetString("web.influxdb.bucket")),
|
||||||
|
"temp",
|
||||||
|
tempTags,
|
||||||
|
tempFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//write data to the weekly bucket if in the last 9 weeks, and week has not been processed yet
|
||||||
|
if _, weekExists := weeklyLookup[yearWeekStr]; !weekExists && postSmartResults.Date.After(weeklyBucketMax) {
|
||||||
|
sr.logger.Debugf("device (%s) smart data added to bucket: weekly", preDevice.WWN)
|
||||||
|
|
||||||
|
//this week/year pair has not been processed
|
||||||
|
weeklyLookup[yearWeekStr] = true
|
||||||
|
// write point immediately
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))),
|
||||||
|
"smart",
|
||||||
|
smartTags,
|
||||||
|
smartFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))),
|
||||||
|
"temp",
|
||||||
|
tempTags,
|
||||||
|
tempFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//write data to the monthly bucket if in the last 9 weeks, and week has not been processed yet
|
||||||
|
if _, monthExists := monthlyLookup[yearMonthStr]; !monthExists && postSmartResults.Date.After(monthlyBucketMax) {
|
||||||
|
sr.logger.Debugf("device (%s) smart data added to bucket: monthly", preDevice.WWN)
|
||||||
|
|
||||||
|
//this month/year pair has not been processed
|
||||||
|
monthlyLookup[yearMonthStr] = true
|
||||||
|
// write point immediately
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))),
|
||||||
|
"smart",
|
||||||
|
smartTags,
|
||||||
|
smartFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))),
|
||||||
|
"temp",
|
||||||
|
tempTags,
|
||||||
|
tempFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, yearExists := yearlyLookup[yearStr]; !yearExists && year != today.Year() {
|
||||||
|
sr.logger.Debugf("device (%s) smart data added to bucket: yearly", preDevice.WWN)
|
||||||
|
|
||||||
|
//this year has not been processed
|
||||||
|
yearlyLookup[yearStr] = true
|
||||||
|
// write point immediately
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))),
|
||||||
|
"smart",
|
||||||
|
smartTags,
|
||||||
|
smartFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sr.saveDatapoint(
|
||||||
|
sr.influxClient.WriteAPIBlocking(sr.appConfig.GetString("web.influxdb.org"), fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))),
|
||||||
|
"temp",
|
||||||
|
tempTags,
|
||||||
|
tempFields,
|
||||||
|
postSmartResults.Date, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sr.logger.Infof("finished processing device %s. weekly: %d, monthly: %d, yearly: %d", preDevice.WWN, len(weeklyLookup), len(monthlyLookup), len(yearlyLookup))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "20220503120000", // cleanup - v0.4.0 - influxdb schema
|
||||||
|
Migrate: func(tx *gorm.DB) error {
|
||||||
|
// delete unnecessary tables.
|
||||||
|
err := tx.Migrator().DropTable(
|
||||||
|
&m20201107210306.Smart{},
|
||||||
|
&m20201107210306.SmartAtaAttribute{},
|
||||||
|
&m20201107210306.SmartNvmeAttribute{},
|
||||||
|
&m20201107210306.SmartScsiAttribute{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//migrate the device database to the final version
|
||||||
|
return tx.AutoMigrate(models.Device{})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := m.Migrate(); err != nil {
|
||||||
|
sr.logger.Errorf("Database migration failed with error: %w", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sr.logger.Infoln("Database migration completed successfully")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
|
func m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.SmartTemperature) {
|
||||||
|
//extract temperature data for every datapoint
|
||||||
|
postSmartTemp := measurements.SmartTemperature{
|
||||||
|
Date: preSmartResult.TestDate,
|
||||||
|
Temp: preSmartResult.Temp,
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, postSmartTemp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
|
func m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(database *gorm.DB, preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.Smart) {
|
||||||
|
//create a measurements.Smart object (which we will then push to the InfluxDB)
|
||||||
|
postDeviceSmartData := measurements.Smart{
|
||||||
|
Date: preSmartResult.TestDate,
|
||||||
|
DeviceWWN: preDevice.WWN,
|
||||||
|
DeviceProtocol: preDevice.DeviceProtocol,
|
||||||
|
Temp: preSmartResult.Temp,
|
||||||
|
PowerOnHours: preSmartResult.PowerOnHours,
|
||||||
|
PowerCycleCount: preSmartResult.PowerCycleCount,
|
||||||
|
|
||||||
|
// this needs to be populated using measurements.Smart.ProcessAtaSmartInfo, ProcessScsiSmartInfo or ProcessNvmeSmartInfo
|
||||||
|
// because those functions will take into account thresholds (which we didn't consider correctly previously)
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := database.Preload("AtaAttributes").Preload("NvmeAttributes").Preload("ScsiAttributes").Find(&preSmartResult)
|
||||||
|
if result.Error != nil {
|
||||||
|
return result.Error, postDeviceSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
if preDevice.IsAta() {
|
||||||
|
preAtaSmartAttributesTable := []collector.AtaSmartAttributesTableItem{}
|
||||||
|
for _, preAtaAttribute := range preSmartResult.AtaAttributes {
|
||||||
|
preAtaSmartAttributesTable = append(preAtaSmartAttributesTable, collector.AtaSmartAttributesTableItem{
|
||||||
|
ID: preAtaAttribute.AttributeId,
|
||||||
|
Name: preAtaAttribute.Name,
|
||||||
|
Value: int64(preAtaAttribute.Value),
|
||||||
|
Worst: int64(preAtaAttribute.Worst),
|
||||||
|
Thresh: int64(preAtaAttribute.Threshold),
|
||||||
|
WhenFailed: preAtaAttribute.WhenFailed,
|
||||||
|
Flags: struct {
|
||||||
|
Value int `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
Prefailure bool `json:"prefailure"`
|
||||||
|
UpdatedOnline bool `json:"updated_online"`
|
||||||
|
Performance bool `json:"performance"`
|
||||||
|
ErrorRate bool `json:"error_rate"`
|
||||||
|
EventCount bool `json:"event_count"`
|
||||||
|
AutoKeep bool `json:"auto_keep"`
|
||||||
|
}{
|
||||||
|
Value: 0,
|
||||||
|
String: "",
|
||||||
|
Prefailure: false,
|
||||||
|
UpdatedOnline: false,
|
||||||
|
Performance: false,
|
||||||
|
ErrorRate: false,
|
||||||
|
EventCount: false,
|
||||||
|
AutoKeep: false,
|
||||||
|
},
|
||||||
|
Raw: struct {
|
||||||
|
Value int64 `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
}{
|
||||||
|
Value: preAtaAttribute.RawValue,
|
||||||
|
String: preAtaAttribute.RawString,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
postDeviceSmartData.ProcessAtaSmartInfo(preAtaSmartAttributesTable)
|
||||||
|
|
||||||
|
} else if preDevice.IsNvme() {
|
||||||
|
//info collector.SmartInfo
|
||||||
|
postNvmeSmartHealthInformation := collector.NvmeSmartHealthInformationLog{}
|
||||||
|
|
||||||
|
for _, preNvmeAttribute := range preSmartResult.NvmeAttributes {
|
||||||
|
switch preNvmeAttribute.AttributeId {
|
||||||
|
case "critical_warning":
|
||||||
|
postNvmeSmartHealthInformation.CriticalWarning = int64(preNvmeAttribute.Value)
|
||||||
|
case "temperature":
|
||||||
|
postNvmeSmartHealthInformation.Temperature = int64(preNvmeAttribute.Value)
|
||||||
|
case "available_spare":
|
||||||
|
postNvmeSmartHealthInformation.AvailableSpare = int64(preNvmeAttribute.Value)
|
||||||
|
case "available_spare_threshold":
|
||||||
|
postNvmeSmartHealthInformation.AvailableSpareThreshold = int64(preNvmeAttribute.Value)
|
||||||
|
case "percentage_used":
|
||||||
|
postNvmeSmartHealthInformation.PercentageUsed = int64(preNvmeAttribute.Value)
|
||||||
|
case "data_units_read":
|
||||||
|
postNvmeSmartHealthInformation.DataUnitsWritten = int64(preNvmeAttribute.Value)
|
||||||
|
case "data_units_written":
|
||||||
|
postNvmeSmartHealthInformation.DataUnitsWritten = int64(preNvmeAttribute.Value)
|
||||||
|
case "host_reads":
|
||||||
|
postNvmeSmartHealthInformation.HostReads = int64(preNvmeAttribute.Value)
|
||||||
|
case "host_writes":
|
||||||
|
postNvmeSmartHealthInformation.HostWrites = int64(preNvmeAttribute.Value)
|
||||||
|
case "controller_busy_time":
|
||||||
|
postNvmeSmartHealthInformation.ControllerBusyTime = int64(preNvmeAttribute.Value)
|
||||||
|
case "power_cycles":
|
||||||
|
postNvmeSmartHealthInformation.PowerCycles = int64(preNvmeAttribute.Value)
|
||||||
|
case "power_on_hours":
|
||||||
|
postNvmeSmartHealthInformation.PowerOnHours = int64(preNvmeAttribute.Value)
|
||||||
|
case "unsafe_shutdowns":
|
||||||
|
postNvmeSmartHealthInformation.UnsafeShutdowns = int64(preNvmeAttribute.Value)
|
||||||
|
case "media_errors":
|
||||||
|
postNvmeSmartHealthInformation.MediaErrors = int64(preNvmeAttribute.Value)
|
||||||
|
case "num_err_log_entries":
|
||||||
|
postNvmeSmartHealthInformation.NumErrLogEntries = int64(preNvmeAttribute.Value)
|
||||||
|
case "warning_temp_time":
|
||||||
|
postNvmeSmartHealthInformation.WarningTempTime = int64(preNvmeAttribute.Value)
|
||||||
|
case "critical_comp_time":
|
||||||
|
postNvmeSmartHealthInformation.CriticalCompTime = int64(preNvmeAttribute.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
postDeviceSmartData.ProcessNvmeSmartInfo(postNvmeSmartHealthInformation)
|
||||||
|
|
||||||
|
} else if preDevice.IsScsi() {
|
||||||
|
//info collector.SmartInfo
|
||||||
|
var postScsiGrownDefectList int64
|
||||||
|
postScsiErrorCounterLog := collector.ScsiErrorCounterLog{
|
||||||
|
Read: struct {
|
||||||
|
ErrorsCorrectedByEccfast int64 `json:"errors_corrected_by_eccfast"`
|
||||||
|
ErrorsCorrectedByEccdelayed int64 `json:"errors_corrected_by_eccdelayed"`
|
||||||
|
ErrorsCorrectedByRereadsRewrites int64 `json:"errors_corrected_by_rereads_rewrites"`
|
||||||
|
TotalErrorsCorrected int64 `json:"total_errors_corrected"`
|
||||||
|
CorrectionAlgorithmInvocations int64 `json:"correction_algorithm_invocations"`
|
||||||
|
GigabytesProcessed string `json:"gigabytes_processed"`
|
||||||
|
TotalUncorrectedErrors int64 `json:"total_uncorrected_errors"`
|
||||||
|
}{},
|
||||||
|
Write: struct {
|
||||||
|
ErrorsCorrectedByEccfast int64 `json:"errors_corrected_by_eccfast"`
|
||||||
|
ErrorsCorrectedByEccdelayed int64 `json:"errors_corrected_by_eccdelayed"`
|
||||||
|
ErrorsCorrectedByRereadsRewrites int64 `json:"errors_corrected_by_rereads_rewrites"`
|
||||||
|
TotalErrorsCorrected int64 `json:"total_errors_corrected"`
|
||||||
|
CorrectionAlgorithmInvocations int64 `json:"correction_algorithm_invocations"`
|
||||||
|
GigabytesProcessed string `json:"gigabytes_processed"`
|
||||||
|
TotalUncorrectedErrors int64 `json:"total_uncorrected_errors"`
|
||||||
|
}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, preScsiAttribute := range preSmartResult.ScsiAttributes {
|
||||||
|
switch preScsiAttribute.AttributeId {
|
||||||
|
case "scsi_grown_defect_list":
|
||||||
|
postScsiGrownDefectList = int64(preScsiAttribute.Value)
|
||||||
|
case "read.errors_corrected_by_eccfast":
|
||||||
|
postScsiErrorCounterLog.Read.ErrorsCorrectedByEccfast = int64(preScsiAttribute.Value)
|
||||||
|
case "read.errors_corrected_by_eccdelayed":
|
||||||
|
postScsiErrorCounterLog.Read.ErrorsCorrectedByEccdelayed = int64(preScsiAttribute.Value)
|
||||||
|
case "read.errors_corrected_by_rereads_rewrites":
|
||||||
|
postScsiErrorCounterLog.Read.ErrorsCorrectedByRereadsRewrites = int64(preScsiAttribute.Value)
|
||||||
|
case "read.total_errors_corrected":
|
||||||
|
postScsiErrorCounterLog.Read.TotalErrorsCorrected = int64(preScsiAttribute.Value)
|
||||||
|
case "read.correction_algorithm_invocations":
|
||||||
|
postScsiErrorCounterLog.Read.CorrectionAlgorithmInvocations = int64(preScsiAttribute.Value)
|
||||||
|
case "read.total_uncorrected_errors":
|
||||||
|
postScsiErrorCounterLog.Read.TotalUncorrectedErrors = int64(preScsiAttribute.Value)
|
||||||
|
case "write.errors_corrected_by_eccfast":
|
||||||
|
postScsiErrorCounterLog.Write.ErrorsCorrectedByEccfast = int64(preScsiAttribute.Value)
|
||||||
|
case "write.errors_corrected_by_eccdelayed":
|
||||||
|
postScsiErrorCounterLog.Write.ErrorsCorrectedByEccdelayed = int64(preScsiAttribute.Value)
|
||||||
|
case "write.errors_corrected_by_rereads_rewrites":
|
||||||
|
postScsiErrorCounterLog.Write.ErrorsCorrectedByRereadsRewrites = int64(preScsiAttribute.Value)
|
||||||
|
case "write.total_errors_corrected":
|
||||||
|
postScsiErrorCounterLog.Write.TotalErrorsCorrected = int64(preScsiAttribute.Value)
|
||||||
|
case "write.correction_algorithm_invocations":
|
||||||
|
postScsiErrorCounterLog.Write.CorrectionAlgorithmInvocations = int64(preScsiAttribute.Value)
|
||||||
|
case "write.total_uncorrected_errors":
|
||||||
|
postScsiErrorCounterLog.Write.TotalUncorrectedErrors = int64(preScsiAttribute.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
postDeviceSmartData.ProcessScsiSmartInfo(postScsiGrownDefectList, postScsiErrorCounterLog)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Unknown device protocol: %s", preDevice.DeviceProtocol), postDeviceSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, postDeviceSmartData
|
||||||
|
}
|
@ -0,0 +1,122 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Tasks
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) error {
|
||||||
|
weeklyTaskName := "tsk-weekly-aggr"
|
||||||
|
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: weeklyTaskName}); findErr == nil && len(found) == 0 {
|
||||||
|
//weekly on Sunday at 1:00am
|
||||||
|
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, weeklyTaskName, sr.DownsampleScript("weekly"), "0 1 * * 0", orgID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
monthlyTaskName := "tsk-monthly-aggr"
|
||||||
|
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: monthlyTaskName}); findErr == nil && len(found) == 0 {
|
||||||
|
//monthly on first day of the month at 1:30am
|
||||||
|
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, monthlyTaskName, sr.DownsampleScript("monthly"), "30 1 1 * *", orgID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yearlyTaskName := "tsk-yearly-aggr"
|
||||||
|
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: yearlyTaskName}); findErr == nil && len(found) == 0 {
|
||||||
|
//yearly on the first day of the year at 2:00am
|
||||||
|
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, yearlyTaskName, sr.DownsampleScript("yearly"), "0 2 1 1 *", orgID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) DownsampleScript(aggregationType string) string {
|
||||||
|
var sourceBucket string // the source of the data
|
||||||
|
var destBucket string // the destination for the aggregated data
|
||||||
|
var rangeStart string
|
||||||
|
var rangeEnd string
|
||||||
|
var aggWindow string
|
||||||
|
switch aggregationType {
|
||||||
|
case "weekly":
|
||||||
|
sourceBucket = sr.appConfig.GetString("web.influxdb.bucket")
|
||||||
|
destBucket = fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
rangeStart = "-2w"
|
||||||
|
rangeEnd = "-1w"
|
||||||
|
aggWindow = "1w"
|
||||||
|
case "monthly":
|
||||||
|
sourceBucket = fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
destBucket = fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
rangeStart = "-2mo"
|
||||||
|
rangeEnd = "-1mo"
|
||||||
|
aggWindow = "1mo"
|
||||||
|
case "yearly":
|
||||||
|
sourceBucket = fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
destBucket = fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||||
|
rangeStart = "-2y"
|
||||||
|
rangeEnd = "-1y"
|
||||||
|
aggWindow = "1y"
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: using "last" function for aggregation. This should eventually be replaced with a more accurate represenation
|
||||||
|
/*
|
||||||
|
import "types"
|
||||||
|
smart_data = from(bucket: sourceBucket)
|
||||||
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> group(columns: ["device_wwn", "_field"])
|
||||||
|
|
||||||
|
non_numeric_smart_data = smart_data
|
||||||
|
|> filter(fn: (r) => types.isType(v: r._value, type: "string") or types.isType(v: r._value, type: "bool"))
|
||||||
|
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||||
|
|
||||||
|
numeric_smart_data = smart_data
|
||||||
|
|> filter(fn: (r) => types.isType(v: r._value, type: "int") or types.isType(v: r._value, type: "float"))
|
||||||
|
|> aggregateWindow(every: aggWindow, fn: mean, createEmpty: false)
|
||||||
|
|
||||||
|
union(tables: [non_numeric_smart_data, numeric_smart_data])
|
||||||
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
sourceBucket = "%s"
|
||||||
|
rangeStart = %s
|
||||||
|
rangeEnd = %s
|
||||||
|
aggWindow = %s
|
||||||
|
destBucket = "%s"
|
||||||
|
destOrg = "%s"
|
||||||
|
|
||||||
|
from(bucket: sourceBucket)
|
||||||
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|
|> group(columns: ["device_wwn", "_field"])
|
||||||
|
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||||
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
|
||||||
|
temp_data = from(bucket: sourceBucket)
|
||||||
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|> toInt()
|
||||||
|
|
||||||
|
temp_data
|
||||||
|
|> aggregateWindow(fn: mean, every: aggWindow)
|
||||||
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
`,
|
||||||
|
sourceBucket,
|
||||||
|
rangeStart,
|
||||||
|
rangeEnd,
|
||||||
|
aggWindow,
|
||||||
|
destBucket,
|
||||||
|
sr.appConfig.GetString("web.influxdb.org"),
|
||||||
|
)
|
||||||
|
}
|
@ -0,0 +1,167 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Temperature Data
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn string, deviceProtocol string, collectorSmartData collector.SmartInfo) error {
|
||||||
|
if len(collectorSmartData.AtaSctTemperatureHistory.Table) > 0 {
|
||||||
|
|
||||||
|
for ndx, temp := range collectorSmartData.AtaSctTemperatureHistory.Table {
|
||||||
|
|
||||||
|
minutesOffset := collectorSmartData.AtaSctTemperatureHistory.LoggingIntervalMinutes * int64(ndx) * 60
|
||||||
|
smartTemp := measurements.SmartTemperature{
|
||||||
|
Date: time.Unix(collectorSmartData.LocalTime.TimeT-minutesOffset, 0),
|
||||||
|
Temp: temp,
|
||||||
|
}
|
||||||
|
|
||||||
|
tags, fields := smartTemp.Flatten()
|
||||||
|
tags["device_wwn"] = wwn
|
||||||
|
p := influxdb2.NewPoint("temp",
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
smartTemp.Date)
|
||||||
|
err := sr.influxWriteApi.WritePoint(ctx, p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// also add the current temperature.
|
||||||
|
} else {
|
||||||
|
|
||||||
|
smartTemp := measurements.SmartTemperature{
|
||||||
|
Date: time.Unix(collectorSmartData.LocalTime.TimeT, 0),
|
||||||
|
Temp: collectorSmartData.Temperature.Current,
|
||||||
|
}
|
||||||
|
|
||||||
|
tags, fields := smartTemp.Flatten()
|
||||||
|
tags["device_wwn"] = wwn
|
||||||
|
p := influxdb2.NewPoint("temp",
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
smartTemp.Date)
|
||||||
|
return sr.influxWriteApi.WritePoint(ctx, p)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error) {
|
||||||
|
//we can get temp history for "week", "month", DURATION_KEY_YEAR, "forever"
|
||||||
|
|
||||||
|
deviceTempHistory := map[string][]measurements.SmartTemperature{}
|
||||||
|
|
||||||
|
//TODO: change the query range to a variable.
|
||||||
|
queryStr := sr.aggregateTempQuery(durationKey)
|
||||||
|
|
||||||
|
result, err := sr.influxQueryApi.Query(ctx, queryStr)
|
||||||
|
if err == nil {
|
||||||
|
// Use Next() to iterate over query result lines
|
||||||
|
for result.Next() {
|
||||||
|
|
||||||
|
if deviceWWN, ok := result.Record().Values()["device_wwn"]; ok {
|
||||||
|
|
||||||
|
//check if deviceWWN has been seen and initialized already
|
||||||
|
if _, ok := deviceTempHistory[deviceWWN.(string)]; !ok {
|
||||||
|
deviceTempHistory[deviceWWN.(string)] = []measurements.SmartTemperature{}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentTempHistory := deviceTempHistory[deviceWWN.(string)]
|
||||||
|
smartTemp := measurements.SmartTemperature{}
|
||||||
|
|
||||||
|
for key, val := range result.Record().Values() {
|
||||||
|
smartTemp.Inflate(key, val)
|
||||||
|
}
|
||||||
|
smartTemp.Date = result.Record().Values()["_time"].(time.Time)
|
||||||
|
currentTempHistory = append(currentTempHistory, smartTemp)
|
||||||
|
deviceTempHistory[deviceWWN.(string)] = currentTempHistory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if result.Err() != nil {
|
||||||
|
fmt.Printf("Query error: %s\n", result.Err().Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return deviceTempHistory, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Helper Methods
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
|
||||||
|
|
||||||
|
/*
|
||||||
|
import "influxdata/influxdb/schema"
|
||||||
|
weekData = from(bucket: "metrics")
|
||||||
|
|> range(start: -1w, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|> toInt()
|
||||||
|
|
||||||
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|
|> range(start: -1mo, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|> toInt()
|
||||||
|
|
||||||
|
union(tables: [weekData, monthData])
|
||||||
|
|> group(columns: ["device_wwn"])
|
||||||
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
partialQueryStr := []string{
|
||||||
|
`import "influxdata/influxdb/schema"`,
|
||||||
|
}
|
||||||
|
|
||||||
|
nestedDurationKeys := sr.lookupNestedDurationKeys(durationKey)
|
||||||
|
|
||||||
|
subQueryNames := []string{}
|
||||||
|
for _, nestedDurationKey := range nestedDurationKeys {
|
||||||
|
bucketName := sr.lookupBucketName(nestedDurationKey)
|
||||||
|
durationRange := sr.lookupDuration(nestedDurationKey)
|
||||||
|
|
||||||
|
subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey))
|
||||||
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
|
fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName),
|
||||||
|
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
|
||||||
|
`|> filter(fn: (r) => r["_measurement"] == "temp" )`,
|
||||||
|
`|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)`,
|
||||||
|
`|> group(columns: ["device_wwn"])`,
|
||||||
|
`|> toInt()`,
|
||||||
|
"",
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subQueryNames) == 1 {
|
||||||
|
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
|
||||||
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
|
subQueryNames[0],
|
||||||
|
"|> schema.fieldsAsCols()",
|
||||||
|
"|> yield()",
|
||||||
|
}...)
|
||||||
|
} else {
|
||||||
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
|
fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")),
|
||||||
|
`|> group(columns: ["device_wwn"])`,
|
||||||
|
`|> sort(columns: ["_time"], desc: false)`,
|
||||||
|
"|> schema.fieldsAsCols()",
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(partialQueryStr, "\n")
|
||||||
|
}
|
@ -1,160 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DeviceWrapper struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Errors []error `json:"errors"`
|
|
||||||
Data []Device `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const DeviceProtocolAta = "ATA"
|
|
||||||
const DeviceProtocolScsi = "SCSI"
|
|
||||||
const DeviceProtocolNvme = "NVMe"
|
|
||||||
|
|
||||||
type Device struct {
|
|
||||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
|
||||||
CreatedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
DeletedAt *time.Time
|
|
||||||
|
|
||||||
WWN string `json:"wwn" gorm:"primary_key"`
|
|
||||||
HostId string `json:"host_id"`
|
|
||||||
|
|
||||||
DeviceName string `json:"device_name"`
|
|
||||||
Manufacturer string `json:"manufacturer"`
|
|
||||||
ModelName string `json:"model_name"`
|
|
||||||
InterfaceType string `json:"interface_type"`
|
|
||||||
InterfaceSpeed string `json:"interface_speed"`
|
|
||||||
SerialNumber string `json:"serial_number"`
|
|
||||||
Firmware string `json:"firmware"`
|
|
||||||
RotationSpeed int `json:"rotational_speed"`
|
|
||||||
Capacity int64 `json:"capacity"`
|
|
||||||
FormFactor string `json:"form_factor"`
|
|
||||||
SmartSupport bool `json:"smart_support"`
|
|
||||||
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
|
||||||
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
|
||||||
SmartResults []Smart `gorm:"foreignkey:DeviceWWN" json:"smart_results"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dv *Device) IsAta() bool {
|
|
||||||
return dv.DeviceProtocol == DeviceProtocolAta
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dv *Device) IsScsi() bool {
|
|
||||||
return dv.DeviceProtocol == DeviceProtocolScsi
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dv *Device) IsNvme() bool {
|
|
||||||
return dv.DeviceProtocol == DeviceProtocolNvme
|
|
||||||
}
|
|
||||||
|
|
||||||
//This method requires a device with an array of SmartResults.
|
|
||||||
//It will remove all SmartResults other than the first (the latest one)
|
|
||||||
//All removed SmartResults, will be processed, grouping SmartAtaAttribute by attribute_id
|
|
||||||
// and adding theme to an array called History.
|
|
||||||
func (dv *Device) SquashHistory() error {
|
|
||||||
if len(dv.SmartResults) <= 1 {
|
|
||||||
return nil //no ataHistory found. ignore
|
|
||||||
}
|
|
||||||
|
|
||||||
latestSmartResultSlice := dv.SmartResults[0:1]
|
|
||||||
historicalSmartResultSlice := dv.SmartResults[1:]
|
|
||||||
|
|
||||||
//re-assign the latest slice to the SmartResults field
|
|
||||||
dv.SmartResults = latestSmartResultSlice
|
|
||||||
|
|
||||||
//process the historical slice for ATA data
|
|
||||||
if len(dv.SmartResults[0].AtaAttributes) > 0 {
|
|
||||||
ataHistory := map[int][]SmartAtaAttribute{}
|
|
||||||
for _, smartResult := range historicalSmartResultSlice {
|
|
||||||
for _, smartAttribute := range smartResult.AtaAttributes {
|
|
||||||
if _, ok := ataHistory[smartAttribute.AttributeId]; !ok {
|
|
||||||
ataHistory[smartAttribute.AttributeId] = []SmartAtaAttribute{}
|
|
||||||
}
|
|
||||||
ataHistory[smartAttribute.AttributeId] = append(ataHistory[smartAttribute.AttributeId], smartAttribute)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//now assign the historical slices to the AtaAttributes in the latest SmartResults
|
|
||||||
for sandx, smartAttribute := range dv.SmartResults[0].AtaAttributes {
|
|
||||||
if attributeHistory, ok := ataHistory[smartAttribute.AttributeId]; ok {
|
|
||||||
dv.SmartResults[0].AtaAttributes[sandx].History = attributeHistory
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//process the historical slice for Nvme data
|
|
||||||
if len(dv.SmartResults[0].NvmeAttributes) > 0 {
|
|
||||||
nvmeHistory := map[string][]SmartNvmeAttribute{}
|
|
||||||
for _, smartResult := range historicalSmartResultSlice {
|
|
||||||
for _, smartAttribute := range smartResult.NvmeAttributes {
|
|
||||||
if _, ok := nvmeHistory[smartAttribute.AttributeId]; !ok {
|
|
||||||
nvmeHistory[smartAttribute.AttributeId] = []SmartNvmeAttribute{}
|
|
||||||
}
|
|
||||||
nvmeHistory[smartAttribute.AttributeId] = append(nvmeHistory[smartAttribute.AttributeId], smartAttribute)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//now assign the historical slices to the AtaAttributes in the latest SmartResults
|
|
||||||
for sandx, smartAttribute := range dv.SmartResults[0].NvmeAttributes {
|
|
||||||
if attributeHistory, ok := nvmeHistory[smartAttribute.AttributeId]; ok {
|
|
||||||
dv.SmartResults[0].NvmeAttributes[sandx].History = attributeHistory
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//process the historical slice for Scsi data
|
|
||||||
if len(dv.SmartResults[0].ScsiAttributes) > 0 {
|
|
||||||
scsiHistory := map[string][]SmartScsiAttribute{}
|
|
||||||
for _, smartResult := range historicalSmartResultSlice {
|
|
||||||
for _, smartAttribute := range smartResult.ScsiAttributes {
|
|
||||||
if _, ok := scsiHistory[smartAttribute.AttributeId]; !ok {
|
|
||||||
scsiHistory[smartAttribute.AttributeId] = []SmartScsiAttribute{}
|
|
||||||
}
|
|
||||||
scsiHistory[smartAttribute.AttributeId] = append(scsiHistory[smartAttribute.AttributeId], smartAttribute)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//now assign the historical slices to the AtaAttributes in the latest SmartResults
|
|
||||||
for sandx, smartAttribute := range dv.SmartResults[0].ScsiAttributes {
|
|
||||||
if attributeHistory, ok := scsiHistory[smartAttribute.AttributeId]; ok {
|
|
||||||
dv.SmartResults[0].ScsiAttributes[sandx].History = attributeHistory
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dv *Device) ApplyMetadataRules() error {
|
|
||||||
|
|
||||||
//embed metadata in the latest smart attributes object
|
|
||||||
if len(dv.SmartResults) > 0 {
|
|
||||||
for ndx, attr := range dv.SmartResults[0].AtaAttributes {
|
|
||||||
attr.PopulateAttributeStatus()
|
|
||||||
dv.SmartResults[0].AtaAttributes[ndx] = attr
|
|
||||||
}
|
|
||||||
|
|
||||||
for ndx, attr := range dv.SmartResults[0].NvmeAttributes {
|
|
||||||
attr.PopulateAttributeStatus()
|
|
||||||
dv.SmartResults[0].NvmeAttributes[ndx] = attr
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
for ndx, attr := range dv.SmartResults[0].ScsiAttributes {
|
|
||||||
attr.PopulateAttributeStatus()
|
|
||||||
dv.SmartResults[0].ScsiAttributes[ndx] = attr
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function is called every time the collector sends SMART data to the API.
|
|
||||||
// It can be used to update device data that can change over time.
|
|
||||||
func (dv *Device) UpdateFromCollectorSmartInfo(info collector.SmartInfo) error {
|
|
||||||
dv.Firmware = info.FirmwareVersion
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,15 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
type SelfTest struct {
|
|
||||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
|
||||||
CreatedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
DeletedAt *time.Time
|
|
||||||
|
|
||||||
DeviceWWN string
|
|
||||||
Device Device `json:"-" gorm:"foreignkey:DeviceWWN"` // use DeviceWWN as foreign key
|
|
||||||
|
|
||||||
Date time.Time
|
|
||||||
}
|
|
@ -1,127 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/metadata"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const SmartWhenFailedFailingNow = "FAILING_NOW"
|
|
||||||
const SmartWhenFailedInThePast = "IN_THE_PAST"
|
|
||||||
|
|
||||||
const SmartStatusPassed = "passed"
|
|
||||||
const SmartStatusFailed = "failed"
|
|
||||||
|
|
||||||
type Smart struct {
|
|
||||||
gorm.Model
|
|
||||||
|
|
||||||
DeviceWWN string `json:"device_wwn"`
|
|
||||||
Device Device `json:"-" gorm:"foreignkey:DeviceWWN"` // use DeviceWWN as foreign key
|
|
||||||
|
|
||||||
TestDate time.Time `json:"date"`
|
|
||||||
SmartStatus string `json:"smart_status"` // SmartStatusPassed or SmartStatusFailed
|
|
||||||
|
|
||||||
//Metrics
|
|
||||||
Temp int64 `json:"temp"`
|
|
||||||
PowerOnHours int64 `json:"power_on_hours"`
|
|
||||||
PowerCycleCount int64 `json:"power_cycle_count"`
|
|
||||||
|
|
||||||
AtaAttributes []SmartAtaAttribute `json:"ata_attributes" gorm:"foreignkey:SmartId"`
|
|
||||||
NvmeAttributes []SmartNvmeAttribute `json:"nvme_attributes" gorm:"foreignkey:SmartId"`
|
|
||||||
ScsiAttributes []SmartScsiAttribute `json:"scsi_attributes" gorm:"foreignkey:SmartId"`
|
|
||||||
}
|
|
||||||
|
|
||||||
//Parse Collector SMART data results and create Smart object (and associated SmartAtaAttribute entries)
|
|
||||||
func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) error {
|
|
||||||
sm.DeviceWWN = wwn
|
|
||||||
sm.TestDate = time.Unix(info.LocalTime.TimeT, 0)
|
|
||||||
|
|
||||||
//smart metrics
|
|
||||||
sm.Temp = info.Temperature.Current
|
|
||||||
sm.PowerCycleCount = info.PowerCycleCount
|
|
||||||
sm.PowerOnHours = info.PowerOnTime.Hours
|
|
||||||
|
|
||||||
// process ATA/NVME/SCSI protocol data
|
|
||||||
if info.Device.Protocol == DeviceProtocolAta {
|
|
||||||
sm.ProcessAtaSmartInfo(info)
|
|
||||||
} else if info.Device.Protocol == DeviceProtocolNvme {
|
|
||||||
sm.ProcessNvmeSmartInfo(info)
|
|
||||||
} else if info.Device.Protocol == DeviceProtocolScsi {
|
|
||||||
sm.ProcessScsiSmartInfo(info)
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.SmartStatus.Passed {
|
|
||||||
sm.SmartStatus = SmartStatusPassed
|
|
||||||
} else {
|
|
||||||
sm.SmartStatus = SmartStatusFailed
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//generate SmartAtaAttribute entries from Scrutiny Collector Smart data.
|
|
||||||
func (sm *Smart) ProcessAtaSmartInfo(info collector.SmartInfo) {
|
|
||||||
sm.AtaAttributes = []SmartAtaAttribute{}
|
|
||||||
for _, collectorAttr := range info.AtaSmartAttributes.Table {
|
|
||||||
attrModel := SmartAtaAttribute{
|
|
||||||
AttributeId: collectorAttr.ID,
|
|
||||||
Name: collectorAttr.Name,
|
|
||||||
Value: collectorAttr.Value,
|
|
||||||
Worst: collectorAttr.Worst,
|
|
||||||
Threshold: collectorAttr.Thresh,
|
|
||||||
RawValue: collectorAttr.Raw.Value,
|
|
||||||
RawString: collectorAttr.Raw.String,
|
|
||||||
WhenFailed: collectorAttr.WhenFailed,
|
|
||||||
}
|
|
||||||
|
|
||||||
//now that we've parsed the data from the smartctl response, lets match it against our metadata rules and add additional Scrutiny specific data.
|
|
||||||
if smartMetadata, ok := metadata.AtaMetadata[collectorAttr.ID]; ok {
|
|
||||||
attrModel.Name = smartMetadata.DisplayName
|
|
||||||
if smartMetadata.Transform != nil {
|
|
||||||
attrModel.TransformedValue = smartMetadata.Transform(attrModel.Value, attrModel.RawValue, attrModel.RawString)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sm.AtaAttributes = append(sm.AtaAttributes, attrModel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//generate SmartNvmeAttribute entries from Scrutiny Collector Smart data.
|
|
||||||
func (sm *Smart) ProcessNvmeSmartInfo(info collector.SmartInfo) {
|
|
||||||
sm.NvmeAttributes = []SmartNvmeAttribute{
|
|
||||||
{AttributeId: "critical_warning", Name: "Critical Warning", Value: info.NvmeSmartHealthInformationLog.CriticalWarning, Threshold: 0},
|
|
||||||
{AttributeId: "temperature", Name: "Temperature", Value: info.NvmeSmartHealthInformationLog.Temperature, Threshold: -1},
|
|
||||||
{AttributeId: "available_spare", Name: "Available Spare", Value: info.NvmeSmartHealthInformationLog.AvailableSpare, Threshold: info.NvmeSmartHealthInformationLog.AvailableSpareThreshold},
|
|
||||||
{AttributeId: "percentage_used", Name: "Percentage Used", Value: info.NvmeSmartHealthInformationLog.PercentageUsed, Threshold: 100},
|
|
||||||
{AttributeId: "data_units_read", Name: "Data Units Read", Value: info.NvmeSmartHealthInformationLog.DataUnitsRead, Threshold: -1},
|
|
||||||
{AttributeId: "data_units_written", Name: "Data Units Written", Value: info.NvmeSmartHealthInformationLog.DataUnitsWritten, Threshold: -1},
|
|
||||||
{AttributeId: "host_reads", Name: "Host Reads", Value: info.NvmeSmartHealthInformationLog.HostReads, Threshold: -1},
|
|
||||||
{AttributeId: "host_writes", Name: "Host Writes", Value: info.NvmeSmartHealthInformationLog.HostWrites, Threshold: -1},
|
|
||||||
{AttributeId: "controller_busy_time", Name: "Controller Busy Time", Value: info.NvmeSmartHealthInformationLog.ControllerBusyTime, Threshold: -1},
|
|
||||||
{AttributeId: "power_cycles", Name: "Power Cycles", Value: info.NvmeSmartHealthInformationLog.PowerCycles, Threshold: -1},
|
|
||||||
{AttributeId: "power_on_hours", Name: "Power on Hours", Value: info.NvmeSmartHealthInformationLog.PowerOnHours, Threshold: -1},
|
|
||||||
{AttributeId: "unsafe_shutdowns", Name: "Unsafe Shutdowns", Value: info.NvmeSmartHealthInformationLog.UnsafeShutdowns, Threshold: -1},
|
|
||||||
{AttributeId: "media_errors", Name: "Media Errors", Value: info.NvmeSmartHealthInformationLog.MediaErrors, Threshold: 0},
|
|
||||||
{AttributeId: "num_err_log_entries", Name: "Numb Err Log Entries", Value: info.NvmeSmartHealthInformationLog.NumErrLogEntries, Threshold: 0},
|
|
||||||
{AttributeId: "warning_temp_time", Name: "Warning Temp Time", Value: info.NvmeSmartHealthInformationLog.WarningTempTime, Threshold: -1},
|
|
||||||
{AttributeId: "critical_comp_time", Name: "Critical CompTime", Value: info.NvmeSmartHealthInformationLog.CriticalCompTime, Threshold: -1},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//generate SmartScsiAttribute entries from Scrutiny Collector Smart data.
|
|
||||||
func (sm *Smart) ProcessScsiSmartInfo(info collector.SmartInfo) {
|
|
||||||
sm.ScsiAttributes = []SmartScsiAttribute{
|
|
||||||
{AttributeId: "scsi_grown_defect_list", Name: "Grown Defect List", Value: info.ScsiGrownDefectList, Threshold: 0},
|
|
||||||
{AttributeId: "read.errors_corrected_by_eccfast", Name: "Read Errors Corrected by ECC Fast", Value: info.ScsiErrorCounterLog.Read.ErrorsCorrectedByEccfast, Threshold: -1},
|
|
||||||
{AttributeId: "read.errors_corrected_by_eccdelayed", Name: "Read Errors Corrected by ECC Delayed", Value: info.ScsiErrorCounterLog.Read.ErrorsCorrectedByEccdelayed, Threshold: -1},
|
|
||||||
{AttributeId: "read.errors_corrected_by_rereads_rewrites", Name: "Read Errors Corrected by ReReads/ReWrites", Value: info.ScsiErrorCounterLog.Read.ErrorsCorrectedByRereadsRewrites, Threshold: 0},
|
|
||||||
{AttributeId: "read.total_errors_corrected", Name: "Read Total Errors Corrected", Value: info.ScsiErrorCounterLog.Read.TotalErrorsCorrected, Threshold: -1},
|
|
||||||
{AttributeId: "read.correction_algorithm_invocations", Name: "Read Correction Algorithm Invocations", Value: info.ScsiErrorCounterLog.Read.CorrectionAlgorithmInvocations, Threshold: -1},
|
|
||||||
{AttributeId: "read.total_uncorrected_errors", Name: "Read Total Uncorrected Errors", Value: info.ScsiErrorCounterLog.Read.TotalUncorrectedErrors, Threshold: 0},
|
|
||||||
{AttributeId: "write.errors_corrected_by_eccfast", Name: "Write Errors Corrected by ECC Fast", Value: info.ScsiErrorCounterLog.Write.ErrorsCorrectedByEccfast, Threshold: -1},
|
|
||||||
{AttributeId: "write.errors_corrected_by_eccdelayed", Name: "Write Errors Corrected by ECC Delayed", Value: info.ScsiErrorCounterLog.Write.ErrorsCorrectedByEccdelayed, Threshold: -1},
|
|
||||||
{AttributeId: "write.errors_corrected_by_rereads_rewrites", Name: "Write Errors Corrected by ReReads/ReWrites", Value: info.ScsiErrorCounterLog.Write.ErrorsCorrectedByRereadsRewrites, Threshold: 0},
|
|
||||||
{AttributeId: "write.total_errors_corrected", Name: "Write Total Errors Corrected", Value: info.ScsiErrorCounterLog.Write.TotalErrorsCorrected, Threshold: -1},
|
|
||||||
{AttributeId: "write.correction_algorithm_invocations", Name: "Write Correction Algorithm Invocations", Value: info.ScsiErrorCounterLog.Write.CorrectionAlgorithmInvocations, Threshold: -1},
|
|
||||||
{AttributeId: "write.total_uncorrected_errors", Name: "Write Total Uncorrected Errors", Value: info.ScsiErrorCounterLog.Write.TotalUncorrectedErrors, Threshold: 0},
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,111 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/metadata"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const SmartAttributeStatusPassed = "passed"
|
|
||||||
const SmartAttributeStatusFailed = "failed"
|
|
||||||
const SmartAttributeStatusWarning = "warn"
|
|
||||||
|
|
||||||
type SmartAtaAttribute struct {
|
|
||||||
gorm.Model
|
|
||||||
|
|
||||||
SmartId int `json:"smart_id"`
|
|
||||||
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
|
|
||||||
|
|
||||||
AttributeId int `json:"attribute_id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Value int `json:"value"`
|
|
||||||
Worst int `json:"worst"`
|
|
||||||
Threshold int `json:"thresh"`
|
|
||||||
RawValue int64 `json:"raw_value"`
|
|
||||||
RawString string `json:"raw_string"`
|
|
||||||
WhenFailed string `json:"when_failed"`
|
|
||||||
|
|
||||||
TransformedValue int64 `json:"transformed_value"`
|
|
||||||
Status string `gorm:"-" json:"status,omitempty"`
|
|
||||||
StatusReason string `gorm:"-" json:"status_reason,omitempty"`
|
|
||||||
FailureRate float64 `gorm:"-" json:"failure_rate,omitempty"`
|
|
||||||
History []SmartAtaAttribute `gorm:"-" json:"history,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
|
||||||
func (sa *SmartAtaAttribute) PopulateAttributeStatus() {
|
|
||||||
if strings.ToUpper(sa.WhenFailed) == SmartWhenFailedFailingNow {
|
|
||||||
//this attribute has previously failed
|
|
||||||
sa.Status = SmartAttributeStatusFailed
|
|
||||||
sa.StatusReason = "Attribute is failing manufacturer SMART threshold"
|
|
||||||
|
|
||||||
} else if strings.ToUpper(sa.WhenFailed) == SmartWhenFailedInThePast {
|
|
||||||
sa.Status = SmartAttributeStatusWarning
|
|
||||||
sa.StatusReason = "Attribute has previously failed manufacturer SMART threshold"
|
|
||||||
}
|
|
||||||
|
|
||||||
if smartMetadata, ok := metadata.AtaMetadata[sa.AttributeId]; ok {
|
|
||||||
sa.MetadataObservedThresholdStatus(smartMetadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
//check if status is blank, set to "passed"
|
|
||||||
if len(sa.Status) == 0 {
|
|
||||||
sa.Status = SmartAttributeStatusPassed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// compare the attribute (raw, normalized, transformed) value to observed thresholds, and update status if necessary
|
|
||||||
func (sa *SmartAtaAttribute) MetadataObservedThresholdStatus(smartMetadata metadata.AtaAttributeMetadata) {
|
|
||||||
//TODO: multiple rules
|
|
||||||
// try to predict the failure rates for observed thresholds that have 0 failure rate and error bars.
|
|
||||||
// - if the attribute is critical
|
|
||||||
// - the failure rate is over 10 - set to failed
|
|
||||||
// - the attribute does not match any threshold, set to warn
|
|
||||||
// - if the attribute is not critical
|
|
||||||
// - if failure rate is above 20 - set to failed
|
|
||||||
// - if failure rate is above 10 but below 20 - set to warn
|
|
||||||
|
|
||||||
//update the smart attribute status based on Observed thresholds.
|
|
||||||
var value int64
|
|
||||||
if smartMetadata.DisplayType == metadata.AtaSmartAttributeDisplayTypeNormalized {
|
|
||||||
value = int64(sa.Value)
|
|
||||||
} else if smartMetadata.DisplayType == metadata.AtaSmartAttributeDisplayTypeTransformed {
|
|
||||||
value = sa.TransformedValue
|
|
||||||
} else {
|
|
||||||
value = sa.RawValue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, obsThresh := range smartMetadata.ObservedThresholds {
|
|
||||||
|
|
||||||
//check if "value" is in this bucket
|
|
||||||
if ((obsThresh.Low == obsThresh.High) && value == obsThresh.Low) ||
|
|
||||||
(obsThresh.Low < value && value <= obsThresh.High) {
|
|
||||||
sa.FailureRate = obsThresh.AnnualFailureRate
|
|
||||||
|
|
||||||
if smartMetadata.Critical {
|
|
||||||
if obsThresh.AnnualFailureRate >= 0.10 {
|
|
||||||
sa.Status = SmartAttributeStatusFailed
|
|
||||||
sa.StatusReason = "Observed Failure Rate for Critical Attribute is greater than 10%"
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if obsThresh.AnnualFailureRate >= 0.20 {
|
|
||||||
sa.Status = SmartAttributeStatusFailed
|
|
||||||
sa.StatusReason = "Observed Failure Rate for Attribute is greater than 20%"
|
|
||||||
} else if obsThresh.AnnualFailureRate >= 0.10 {
|
|
||||||
sa.Status = SmartAttributeStatusWarning
|
|
||||||
sa.StatusReason = "Observed Failure Rate for Attribute is greater than 10%"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//we've found the correct bucket, we can drop out of this loop
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// no bucket found
|
|
||||||
if smartMetadata.Critical {
|
|
||||||
sa.Status = SmartAttributeStatusWarning
|
|
||||||
sa.StatusReason = "Could not determine Observed Failure Rate for Critical Attribute"
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/metadata"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SmartNvmeAttribute struct {
|
|
||||||
gorm.Model
|
|
||||||
|
|
||||||
SmartId int `json:"smart_id"`
|
|
||||||
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
|
|
||||||
|
|
||||||
AttributeId string `json:"attribute_id"` //json string from smartctl
|
|
||||||
Name string `json:"name"`
|
|
||||||
Value int `json:"value"`
|
|
||||||
Threshold int `json:"thresh"`
|
|
||||||
|
|
||||||
TransformedValue int64 `json:"transformed_value"`
|
|
||||||
Status string `gorm:"-" json:"status,omitempty"`
|
|
||||||
StatusReason string `gorm:"-" json:"status_reason,omitempty"`
|
|
||||||
FailureRate float64 `gorm:"-" json:"failure_rate,omitempty"`
|
|
||||||
History []SmartNvmeAttribute `gorm:"-" json:"history,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
|
||||||
func (sa *SmartNvmeAttribute) PopulateAttributeStatus() {
|
|
||||||
|
|
||||||
//-1 is a special number meaning no threshold.
|
|
||||||
if sa.Threshold != -1 {
|
|
||||||
if smartMetadata, ok := metadata.NmveMetadata[sa.AttributeId]; ok {
|
|
||||||
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
|
|
||||||
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
|
|
||||||
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
|
|
||||||
sa.Status = SmartAttributeStatusFailed
|
|
||||||
sa.StatusReason = "Attribute is failing recommended SMART threshold"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//TODO: eventually figure out the critical_warning bits and determine correct error messages here.
|
|
||||||
|
|
||||||
//check if status is blank, set to "passed"
|
|
||||||
if len(sa.Status) == 0 {
|
|
||||||
sa.Status = SmartAttributeStatusPassed
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/metadata"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SmartScsiAttribute struct {
|
|
||||||
gorm.Model
|
|
||||||
|
|
||||||
SmartId int `json:"smart_id"`
|
|
||||||
Smart Device `json:"-" gorm:"foreignkey:SmartId"` // use SmartId as foreign key
|
|
||||||
|
|
||||||
AttributeId string `json:"attribute_id"` //json string from smartctl
|
|
||||||
Name string `json:"name"`
|
|
||||||
Value int `json:"value"`
|
|
||||||
Threshold int `json:"thresh"`
|
|
||||||
|
|
||||||
TransformedValue int64 `json:"transformed_value"`
|
|
||||||
Status string `gorm:"-" json:"status,omitempty"`
|
|
||||||
StatusReason string `gorm:"-" json:"status_reason,omitempty"`
|
|
||||||
FailureRate float64 `gorm:"-" json:"failure_rate,omitempty"`
|
|
||||||
History []SmartScsiAttribute `gorm:"-" json:"history,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
|
||||||
func (sa *SmartScsiAttribute) PopulateAttributeStatus() {
|
|
||||||
|
|
||||||
//-1 is a special number meaning no threshold.
|
|
||||||
if sa.Threshold != -1 {
|
|
||||||
if smartMetadata, ok := metadata.NmveMetadata[sa.AttributeId]; ok {
|
|
||||||
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
|
|
||||||
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
|
|
||||||
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
|
|
||||||
sa.Status = SmartAttributeStatusFailed
|
|
||||||
sa.StatusReason = "Attribute is failing recommended SMART threshold"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//check if status is blank, set to "passed"
|
|
||||||
if len(sa.Status) == 0 {
|
|
||||||
sa.Status = SmartAttributeStatusPassed
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,155 +0,0 @@
|
|||||||
package db_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/db"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFromCollectorSmartInfo(t *testing.T) {
|
|
||||||
//setup
|
|
||||||
smartDataFile, err := os.Open("../testdata/smart-ata.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer smartDataFile.Close()
|
|
||||||
|
|
||||||
var smartJson collector.SmartInfo
|
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//test
|
|
||||||
smartMdl := db.Smart{}
|
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
|
||||||
|
|
||||||
//assert
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
|
||||||
require.Equal(t, "passed", smartMdl.SmartStatus)
|
|
||||||
require.Equal(t, 18, len(smartMdl.AtaAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.NvmeAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.ScsiAttributes))
|
|
||||||
|
|
||||||
//check that temperature was correctly parsed
|
|
||||||
for _, attr := range smartMdl.AtaAttributes {
|
|
||||||
if attr.AttributeId == 194 {
|
|
||||||
require.Equal(t, int64(163210330144), attr.RawValue)
|
|
||||||
require.Equal(t, int64(32), attr.TransformedValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFromCollectorSmartInfo_Fail(t *testing.T) {
|
|
||||||
//setup
|
|
||||||
smartDataFile, err := os.Open("../testdata/smart-fail.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer smartDataFile.Close()
|
|
||||||
|
|
||||||
var smartJson collector.SmartInfo
|
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//test
|
|
||||||
smartMdl := db.Smart{}
|
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
|
||||||
|
|
||||||
//assert
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
|
||||||
require.Equal(t, "failed", smartMdl.SmartStatus)
|
|
||||||
require.Equal(t, 0, len(smartMdl.AtaAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.NvmeAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.ScsiAttributes))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFromCollectorSmartInfo_Fail2(t *testing.T) {
|
|
||||||
//setup
|
|
||||||
smartDataFile, err := os.Open("../testdata/smart-fail2.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer smartDataFile.Close()
|
|
||||||
|
|
||||||
var smartJson collector.SmartInfo
|
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//test
|
|
||||||
smartMdl := db.Smart{}
|
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
|
||||||
|
|
||||||
//assert
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
|
||||||
require.Equal(t, "failed", smartMdl.SmartStatus)
|
|
||||||
require.Equal(t, 17, len(smartMdl.AtaAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.NvmeAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.ScsiAttributes))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
|
|
||||||
//setup
|
|
||||||
smartDataFile, err := os.Open("../testdata/smart-nvme.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer smartDataFile.Close()
|
|
||||||
|
|
||||||
var smartJson collector.SmartInfo
|
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//test
|
|
||||||
smartMdl := db.Smart{}
|
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
|
||||||
|
|
||||||
//assert
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
|
||||||
require.Equal(t, "passed", smartMdl.SmartStatus)
|
|
||||||
require.Equal(t, 0, len(smartMdl.AtaAttributes))
|
|
||||||
require.Equal(t, 16, len(smartMdl.NvmeAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.ScsiAttributes))
|
|
||||||
|
|
||||||
require.Equal(t, 111303174, smartMdl.NvmeAttributes[6].Value)
|
|
||||||
require.Equal(t, 83170961, smartMdl.NvmeAttributes[7].Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFromCollectorSmartInfo_Scsi(t *testing.T) {
|
|
||||||
//setup
|
|
||||||
smartDataFile, err := os.Open("../testdata/smart-scsi.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer smartDataFile.Close()
|
|
||||||
|
|
||||||
var smartJson collector.SmartInfo
|
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//test
|
|
||||||
smartMdl := db.Smart{}
|
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
|
||||||
|
|
||||||
//assert
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
|
||||||
require.Equal(t, "passed", smartMdl.SmartStatus)
|
|
||||||
require.Equal(t, 0, len(smartMdl.AtaAttributes))
|
|
||||||
require.Equal(t, 0, len(smartMdl.NvmeAttributes))
|
|
||||||
require.Equal(t, 13, len(smartMdl.ScsiAttributes))
|
|
||||||
|
|
||||||
require.Equal(t, 56, smartMdl.ScsiAttributes[0].Value)
|
|
||||||
require.Equal(t, 300357663, smartMdl.ScsiAttributes[4].Value) //total_errors_corrected
|
|
||||||
}
|
|
@ -0,0 +1,169 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DeviceWrapper struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Errors []error `json:"errors"`
|
||||||
|
Data []Device `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Device struct {
|
||||||
|
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||||
|
CreatedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
|
DeletedAt *time.Time
|
||||||
|
|
||||||
|
WWN string `json:"wwn" gorm:"primary_key"`
|
||||||
|
|
||||||
|
DeviceName string `json:"device_name"`
|
||||||
|
Manufacturer string `json:"manufacturer"`
|
||||||
|
ModelName string `json:"model_name"`
|
||||||
|
InterfaceType string `json:"interface_type"`
|
||||||
|
InterfaceSpeed string `json:"interface_speed"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
Firmware string `json:"firmware"`
|
||||||
|
RotationSpeed int `json:"rotational_speed"`
|
||||||
|
Capacity int64 `json:"capacity"`
|
||||||
|
FormFactor string `json:"form_factor"`
|
||||||
|
SmartSupport bool `json:"smart_support"`
|
||||||
|
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||||
|
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||||
|
|
||||||
|
// User provided metadata
|
||||||
|
Label string `json:"label"`
|
||||||
|
HostId string `json:"host_id"`
|
||||||
|
|
||||||
|
// Data set by Scrutiny
|
||||||
|
DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dv *Device) IsAta() bool {
|
||||||
|
return dv.DeviceProtocol == pkg.DeviceProtocolAta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dv *Device) IsScsi() bool {
|
||||||
|
return dv.DeviceProtocol == pkg.DeviceProtocolScsi
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dv *Device) IsNvme() bool {
|
||||||
|
return dv.DeviceProtocol == pkg.DeviceProtocolNvme
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
////This method requires a device with an array of SmartResults.
|
||||||
|
////It will remove all SmartResults other than the first (the latest one)
|
||||||
|
////All removed SmartResults, will be processed, grouping SmartAtaAttribute by attribute_id
|
||||||
|
//// and adding theme to an array called History.
|
||||||
|
//func (dv *Device) SquashHistory() error {
|
||||||
|
// if len(dv.SmartResults) <= 1 {
|
||||||
|
// return nil //no ataHistory found. ignore
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// latestSmartResultSlice := dv.SmartResults[0:1]
|
||||||
|
// historicalSmartResultSlice := dv.SmartResults[1:]
|
||||||
|
//
|
||||||
|
// //re-assign the latest slice to the SmartResults field
|
||||||
|
// dv.SmartResults = latestSmartResultSlice
|
||||||
|
//
|
||||||
|
// //process the historical slice for ATA data
|
||||||
|
// if len(dv.SmartResults[0].AtaAttributes) > 0 {
|
||||||
|
// ataHistory := map[int][]SmartAtaAttribute{}
|
||||||
|
// for _, smartResult := range historicalSmartResultSlice {
|
||||||
|
// for _, smartAttribute := range smartResult.AtaAttributes {
|
||||||
|
// if _, ok := ataHistory[smartAttribute.AttributeId]; !ok {
|
||||||
|
// ataHistory[smartAttribute.AttributeId] = []SmartAtaAttribute{}
|
||||||
|
// }
|
||||||
|
// ataHistory[smartAttribute.AttributeId] = append(ataHistory[smartAttribute.AttributeId], smartAttribute)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// //now assign the historical slices to the AtaAttributes in the latest SmartResults
|
||||||
|
// for sandx, smartAttribute := range dv.SmartResults[0].AtaAttributes {
|
||||||
|
// if attributeHistory, ok := ataHistory[smartAttribute.AttributeId]; ok {
|
||||||
|
// dv.SmartResults[0].AtaAttributes[sandx].History = attributeHistory
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// //process the historical slice for Nvme data
|
||||||
|
// if len(dv.SmartResults[0].NvmeAttributes) > 0 {
|
||||||
|
// nvmeHistory := map[string][]SmartNvmeAttribute{}
|
||||||
|
// for _, smartResult := range historicalSmartResultSlice {
|
||||||
|
// for _, smartAttribute := range smartResult.NvmeAttributes {
|
||||||
|
// if _, ok := nvmeHistory[smartAttribute.AttributeId]; !ok {
|
||||||
|
// nvmeHistory[smartAttribute.AttributeId] = []SmartNvmeAttribute{}
|
||||||
|
// }
|
||||||
|
// nvmeHistory[smartAttribute.AttributeId] = append(nvmeHistory[smartAttribute.AttributeId], smartAttribute)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// //now assign the historical slices to the AtaAttributes in the latest SmartResults
|
||||||
|
// for sandx, smartAttribute := range dv.SmartResults[0].NvmeAttributes {
|
||||||
|
// if attributeHistory, ok := nvmeHistory[smartAttribute.AttributeId]; ok {
|
||||||
|
// dv.SmartResults[0].NvmeAttributes[sandx].History = attributeHistory
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// //process the historical slice for Scsi data
|
||||||
|
// if len(dv.SmartResults[0].ScsiAttributes) > 0 {
|
||||||
|
// scsiHistory := map[string][]SmartScsiAttribute{}
|
||||||
|
// for _, smartResult := range historicalSmartResultSlice {
|
||||||
|
// for _, smartAttribute := range smartResult.ScsiAttributes {
|
||||||
|
// if _, ok := scsiHistory[smartAttribute.AttributeId]; !ok {
|
||||||
|
// scsiHistory[smartAttribute.AttributeId] = []SmartScsiAttribute{}
|
||||||
|
// }
|
||||||
|
// scsiHistory[smartAttribute.AttributeId] = append(scsiHistory[smartAttribute.AttributeId], smartAttribute)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// //now assign the historical slices to the AtaAttributes in the latest SmartResults
|
||||||
|
// for sandx, smartAttribute := range dv.SmartResults[0].ScsiAttributes {
|
||||||
|
// if attributeHistory, ok := scsiHistory[smartAttribute.AttributeId]; ok {
|
||||||
|
// dv.SmartResults[0].ScsiAttributes[sandx].History = attributeHistory
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (dv *Device) ApplyMetadataRules() error {
|
||||||
|
//
|
||||||
|
// //embed metadata in the latest smart attributes object
|
||||||
|
// if len(dv.SmartResults) > 0 {
|
||||||
|
// for ndx, attr := range dv.SmartResults[0].AtaAttributes {
|
||||||
|
// attr.PopulateAttributeStatus()
|
||||||
|
// dv.SmartResults[0].AtaAttributes[ndx] = attr
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// for ndx, attr := range dv.SmartResults[0].NvmeAttributes {
|
||||||
|
// attr.PopulateAttributeStatus()
|
||||||
|
// dv.SmartResults[0].NvmeAttributes[ndx] = attr
|
||||||
|
//
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// for ndx, attr := range dv.SmartResults[0].ScsiAttributes {
|
||||||
|
// attr.PopulateAttributeStatus()
|
||||||
|
// dv.SmartResults[0].ScsiAttributes[ndx] = attr
|
||||||
|
//
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
|
||||||
|
// This function is called every time the collector sends SMART data to the API.
|
||||||
|
// It can be used to update device data that can change over time.
|
||||||
|
func (dv *Device) UpdateFromCollectorSmartInfo(info collector.SmartInfo) error {
|
||||||
|
dv.Firmware = info.FirmwareVersion
|
||||||
|
dv.DeviceProtocol = info.Device.Protocol
|
||||||
|
|
||||||
|
if !info.SmartStatus.Passed {
|
||||||
|
dv.DeviceStatus = pkg.Set(dv.DeviceStatus, pkg.DeviceStatusFailedSmart)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,27 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DeviceSummaryWrapper struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Errors []error `json:"errors"`
|
||||||
|
Data struct {
|
||||||
|
Summary map[string]*DeviceSummary `json:"summary"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceSummary struct {
|
||||||
|
Device Device `json:"device"`
|
||||||
|
|
||||||
|
SmartResults *SmartSummary `json:"smart,omitempty"`
|
||||||
|
TempHistory []measurements.SmartTemperature `json:"temp_history,omitempty"`
|
||||||
|
}
|
||||||
|
type SmartSummary struct {
|
||||||
|
// Collector Summary Data
|
||||||
|
CollectorDate time.Time `json:"collector_date,omitempty"`
|
||||||
|
Temp int64 `json:"temp,omitempty"`
|
||||||
|
PowerOnHours int64 `json:"power_on_hours,omitempty"`
|
||||||
|
}
|
@ -0,0 +1,211 @@
|
|||||||
|
package measurements
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Smart struct {
|
||||||
|
Date time.Time `json:"date"`
|
||||||
|
DeviceWWN string `json:"device_wwn"` //(tag)
|
||||||
|
DeviceProtocol string `json:"device_protocol"`
|
||||||
|
|
||||||
|
//Metrics (fields)
|
||||||
|
Temp int64 `json:"temp"`
|
||||||
|
PowerOnHours int64 `json:"power_on_hours"`
|
||||||
|
PowerCycleCount int64 `json:"power_cycle_count"`
|
||||||
|
|
||||||
|
//Attributes (fields)
|
||||||
|
Attributes map[string]SmartAttribute `json:"attrs"`
|
||||||
|
|
||||||
|
//status
|
||||||
|
Status pkg.DeviceStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *Smart) Flatten() (tags map[string]string, fields map[string]interface{}) {
|
||||||
|
tags = map[string]string{
|
||||||
|
"device_wwn": sm.DeviceWWN,
|
||||||
|
"device_protocol": sm.DeviceProtocol,
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = map[string]interface{}{
|
||||||
|
"temp": sm.Temp,
|
||||||
|
"power_on_hours": sm.PowerOnHours,
|
||||||
|
"power_cycle_count": sm.PowerCycleCount,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range sm.Attributes {
|
||||||
|
for attrKey, attrVal := range attr.Flatten() {
|
||||||
|
fields[attrKey] = attrVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tags, fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
|
||||||
|
//go though the massive map returned from influxdb. If a key is associated with the Smart struct, assign it. If it starts with "attr.*" group it by attributeId, and pass to attribute inflate.
|
||||||
|
|
||||||
|
sm := Smart{
|
||||||
|
//required fields
|
||||||
|
Date: attrs["_time"].(time.Time),
|
||||||
|
DeviceWWN: attrs["device_wwn"].(string),
|
||||||
|
DeviceProtocol: attrs["device_protocol"].(string),
|
||||||
|
|
||||||
|
Attributes: map[string]SmartAttribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, val := range attrs {
|
||||||
|
switch key {
|
||||||
|
case "temp":
|
||||||
|
sm.Temp = val.(int64)
|
||||||
|
case "power_on_hours":
|
||||||
|
sm.PowerOnHours = val.(int64)
|
||||||
|
case "power_cycle_count":
|
||||||
|
sm.PowerCycleCount = val.(int64)
|
||||||
|
default:
|
||||||
|
// this key is unknown.
|
||||||
|
if !strings.HasPrefix(key, "attr.") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//this is a attribute, lets group it with its related "siblings", populating a SmartAttribute object
|
||||||
|
keyParts := strings.Split(key, ".")
|
||||||
|
attributeId := keyParts[1]
|
||||||
|
if _, ok := sm.Attributes[attributeId]; !ok {
|
||||||
|
// init the attribute group
|
||||||
|
if sm.DeviceProtocol == pkg.DeviceProtocolAta {
|
||||||
|
sm.Attributes[attributeId] = &SmartAtaAttribute{}
|
||||||
|
} else if sm.DeviceProtocol == pkg.DeviceProtocolNvme {
|
||||||
|
sm.Attributes[attributeId] = &SmartNvmeAttribute{}
|
||||||
|
} else if sm.DeviceProtocol == pkg.DeviceProtocolScsi {
|
||||||
|
sm.Attributes[attributeId] = &SmartScsiAttribute{}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("Unknown Device Protocol: %s", sm.DeviceProtocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Attributes[attributeId].Inflate(key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Found Smart Device (%s) Attributes (%v)", sm.DeviceWWN, len(sm.Attributes))
|
||||||
|
|
||||||
|
return &sm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Parse Collector SMART data results and create Smart object (and associated SmartAtaAttribute entries)
|
||||||
|
func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) error {
|
||||||
|
sm.DeviceWWN = wwn
|
||||||
|
sm.Date = time.Unix(info.LocalTime.TimeT, 0)
|
||||||
|
|
||||||
|
//smart metrics
|
||||||
|
sm.Temp = info.Temperature.Current
|
||||||
|
sm.PowerCycleCount = info.PowerCycleCount
|
||||||
|
sm.PowerOnHours = info.PowerOnTime.Hours
|
||||||
|
if !info.SmartStatus.Passed {
|
||||||
|
sm.Status = pkg.DeviceStatusFailedSmart
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.DeviceProtocol = info.Device.Protocol
|
||||||
|
// process ATA/NVME/SCSI protocol data
|
||||||
|
sm.Attributes = map[string]SmartAttribute{}
|
||||||
|
if sm.DeviceProtocol == pkg.DeviceProtocolAta {
|
||||||
|
sm.ProcessAtaSmartInfo(info.AtaSmartAttributes.Table)
|
||||||
|
} else if sm.DeviceProtocol == pkg.DeviceProtocolNvme {
|
||||||
|
sm.ProcessNvmeSmartInfo(info.NvmeSmartHealthInformationLog)
|
||||||
|
} else if sm.DeviceProtocol == pkg.DeviceProtocolScsi {
|
||||||
|
sm.ProcessScsiSmartInfo(info.ScsiGrownDefectList, info.ScsiErrorCounterLog)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//generate SmartAtaAttribute entries from Scrutiny Collector Smart data.
|
||||||
|
func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTableItem) {
|
||||||
|
for _, collectorAttr := range tableItems {
|
||||||
|
attrModel := SmartAtaAttribute{
|
||||||
|
AttributeId: collectorAttr.ID,
|
||||||
|
Value: collectorAttr.Value,
|
||||||
|
Worst: collectorAttr.Worst,
|
||||||
|
Threshold: collectorAttr.Thresh,
|
||||||
|
RawValue: collectorAttr.Raw.Value,
|
||||||
|
RawString: collectorAttr.Raw.String,
|
||||||
|
WhenFailed: collectorAttr.WhenFailed,
|
||||||
|
}
|
||||||
|
|
||||||
|
//now that we've parsed the data from the smartctl response, lets match it against our metadata rules and add additional Scrutiny specific data.
|
||||||
|
if smartMetadata, ok := thresholds.AtaMetadata[collectorAttr.ID]; ok {
|
||||||
|
if smartMetadata.Transform != nil {
|
||||||
|
attrModel.TransformedValue = smartMetadata.Transform(attrModel.Value, attrModel.RawValue, attrModel.RawString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
attrModel.PopulateAttributeStatus()
|
||||||
|
sm.Attributes[strconv.Itoa(collectorAttr.ID)] = &attrModel
|
||||||
|
if attrModel.Status == pkg.SmartAttributeStatusFailed {
|
||||||
|
sm.Status = pkg.Set(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//generate SmartNvmeAttribute entries from Scrutiny Collector Smart data.
|
||||||
|
func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.NvmeSmartHealthInformationLog) {
|
||||||
|
|
||||||
|
sm.Attributes = map[string]SmartAttribute{
|
||||||
|
"critical_warning": (&SmartNvmeAttribute{AttributeId: "critical_warning", Value: nvmeSmartHealthInformationLog.CriticalWarning, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"temperature": (&SmartNvmeAttribute{AttributeId: "temperature", Value: nvmeSmartHealthInformationLog.Temperature, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"available_spare": (&SmartNvmeAttribute{AttributeId: "available_spare", Value: nvmeSmartHealthInformationLog.AvailableSpare, Threshold: nvmeSmartHealthInformationLog.AvailableSpareThreshold}).PopulateAttributeStatus(),
|
||||||
|
"percentage_used": (&SmartNvmeAttribute{AttributeId: "percentage_used", Value: nvmeSmartHealthInformationLog.PercentageUsed, Threshold: 100}).PopulateAttributeStatus(),
|
||||||
|
"data_units_read": (&SmartNvmeAttribute{AttributeId: "data_units_read", Value: nvmeSmartHealthInformationLog.DataUnitsRead, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"data_units_written": (&SmartNvmeAttribute{AttributeId: "data_units_written", Value: nvmeSmartHealthInformationLog.DataUnitsWritten, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"host_reads": (&SmartNvmeAttribute{AttributeId: "host_reads", Value: nvmeSmartHealthInformationLog.HostReads, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"host_writes": (&SmartNvmeAttribute{AttributeId: "host_writes", Value: nvmeSmartHealthInformationLog.HostWrites, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"controller_busy_time": (&SmartNvmeAttribute{AttributeId: "controller_busy_time", Value: nvmeSmartHealthInformationLog.ControllerBusyTime, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"power_cycles": (&SmartNvmeAttribute{AttributeId: "power_cycles", Value: nvmeSmartHealthInformationLog.PowerCycles, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"power_on_hours": (&SmartNvmeAttribute{AttributeId: "power_on_hours", Value: nvmeSmartHealthInformationLog.PowerOnHours, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"unsafe_shutdowns": (&SmartNvmeAttribute{AttributeId: "unsafe_shutdowns", Value: nvmeSmartHealthInformationLog.UnsafeShutdowns, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"media_errors": (&SmartNvmeAttribute{AttributeId: "media_errors", Value: nvmeSmartHealthInformationLog.MediaErrors, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"num_err_log_entries": (&SmartNvmeAttribute{AttributeId: "num_err_log_entries", Value: nvmeSmartHealthInformationLog.NumErrLogEntries, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"warning_temp_time": (&SmartNvmeAttribute{AttributeId: "warning_temp_time", Value: nvmeSmartHealthInformationLog.WarningTempTime, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"critical_comp_time": (&SmartNvmeAttribute{AttributeId: "critical_comp_time", Value: nvmeSmartHealthInformationLog.CriticalCompTime, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
}
|
||||||
|
|
||||||
|
//find analyzed attribute status
|
||||||
|
for _, val := range sm.Attributes {
|
||||||
|
if val.GetStatus() == pkg.SmartAttributeStatusFailed {
|
||||||
|
sm.Status = pkg.Set(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//generate SmartScsiAttribute entries from Scrutiny Collector Smart data.
|
||||||
|
func (sm *Smart) ProcessScsiSmartInfo(defectGrownList int64, scsiErrorCounterLog collector.ScsiErrorCounterLog) {
|
||||||
|
sm.Attributes = map[string]SmartAttribute{
|
||||||
|
"scsi_grown_defect_list": (&SmartScsiAttribute{AttributeId: "scsi_grown_defect_list", Value: defectGrownList, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"read_errors_corrected_by_eccfast": (&SmartScsiAttribute{AttributeId: "read_errors_corrected_by_eccfast", Value: scsiErrorCounterLog.Read.ErrorsCorrectedByEccfast, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"read_errors_corrected_by_eccdelayed": (&SmartScsiAttribute{AttributeId: "read_errors_corrected_by_eccdelayed", Value: scsiErrorCounterLog.Read.ErrorsCorrectedByEccdelayed, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"read_errors_corrected_by_rereads_rewrites": (&SmartScsiAttribute{AttributeId: "read_errors_corrected_by_rereads_rewrites", Value: scsiErrorCounterLog.Read.ErrorsCorrectedByRereadsRewrites, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"read_total_errors_corrected": (&SmartScsiAttribute{AttributeId: "read_total_errors_corrected", Value: scsiErrorCounterLog.Read.TotalErrorsCorrected, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"read_correction_algorithm_invocations": (&SmartScsiAttribute{AttributeId: "read_correction_algorithm_invocations", Value: scsiErrorCounterLog.Read.CorrectionAlgorithmInvocations, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"read_total_uncorrected_errors": (&SmartScsiAttribute{AttributeId: "read_total_uncorrected_errors", Value: scsiErrorCounterLog.Read.TotalUncorrectedErrors, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"write_errors_corrected_by_eccfast": (&SmartScsiAttribute{AttributeId: "write_errors_corrected_by_eccfast", Value: scsiErrorCounterLog.Write.ErrorsCorrectedByEccfast, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"write_errors_corrected_by_eccdelayed": (&SmartScsiAttribute{AttributeId: "write_errors_corrected_by_eccdelayed", Value: scsiErrorCounterLog.Write.ErrorsCorrectedByEccdelayed, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"write_errors_corrected_by_rereads_rewrites": (&SmartScsiAttribute{AttributeId: "write_errors_corrected_by_rereads_rewrites", Value: scsiErrorCounterLog.Write.ErrorsCorrectedByRereadsRewrites, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
"write_total_errors_corrected": (&SmartScsiAttribute{AttributeId: "write_total_errors_corrected", Value: scsiErrorCounterLog.Write.TotalErrorsCorrected, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"write_correction_algorithm_invocations": (&SmartScsiAttribute{AttributeId: "write_correction_algorithm_invocations", Value: scsiErrorCounterLog.Write.CorrectionAlgorithmInvocations, Threshold: -1}).PopulateAttributeStatus(),
|
||||||
|
"write_total_uncorrected_errors": (&SmartScsiAttribute{AttributeId: "write_total_uncorrected_errors", Value: scsiErrorCounterLog.Write.TotalUncorrectedErrors, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
}
|
||||||
|
|
||||||
|
//find analyzed attribute status
|
||||||
|
for _, val := range sm.Attributes {
|
||||||
|
if val.GetStatus() == pkg.SmartAttributeStatusFailed {
|
||||||
|
sm.Status = pkg.Set(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,163 @@
|
|||||||
|
package measurements
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SmartAtaAttribute struct {
|
||||||
|
AttributeId int `json:"attribute_id"`
|
||||||
|
Value int64 `json:"value"`
|
||||||
|
Threshold int64 `json:"thresh"`
|
||||||
|
Worst int64 `json:"worst"`
|
||||||
|
RawValue int64 `json:"raw_value"`
|
||||||
|
RawString string `json:"raw_string"`
|
||||||
|
WhenFailed string `json:"when_failed"`
|
||||||
|
|
||||||
|
//Generated data
|
||||||
|
TransformedValue int64 `json:"transformed_value"`
|
||||||
|
Status int64 `json:"status"`
|
||||||
|
StatusReason string `json:"status_reason,omitempty"`
|
||||||
|
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sa *SmartAtaAttribute) GetStatus() int64 {
|
||||||
|
return sa.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sa *SmartAtaAttribute) Flatten() map[string]interface{} {
|
||||||
|
|
||||||
|
idString := strconv.Itoa(sa.AttributeId)
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
fmt.Sprintf("attr.%s.attribute_id", idString): idString,
|
||||||
|
fmt.Sprintf("attr.%s.value", idString): sa.Value,
|
||||||
|
fmt.Sprintf("attr.%s.worst", idString): sa.Worst,
|
||||||
|
fmt.Sprintf("attr.%s.thresh", idString): sa.Threshold,
|
||||||
|
fmt.Sprintf("attr.%s.raw_value", idString): sa.RawValue,
|
||||||
|
fmt.Sprintf("attr.%s.raw_string", idString): sa.RawString,
|
||||||
|
fmt.Sprintf("attr.%s.when_failed", idString): sa.WhenFailed,
|
||||||
|
|
||||||
|
//Generated Data
|
||||||
|
fmt.Sprintf("attr.%s.transformed_value", idString): sa.TransformedValue,
|
||||||
|
fmt.Sprintf("attr.%s.status", idString): sa.Status,
|
||||||
|
fmt.Sprintf("attr.%s.status_reason", idString): sa.StatusReason,
|
||||||
|
fmt.Sprintf("attr.%s.failure_rate", idString): sa.FailureRate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
|
||||||
|
if val == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
keyParts := strings.Split(key, ".")
|
||||||
|
|
||||||
|
switch keyParts[2] {
|
||||||
|
case "attribute_id":
|
||||||
|
attrId, err := strconv.Atoi(val.(string))
|
||||||
|
if err == nil {
|
||||||
|
sa.AttributeId = attrId
|
||||||
|
}
|
||||||
|
case "value":
|
||||||
|
sa.Value = val.(int64)
|
||||||
|
case "worst":
|
||||||
|
sa.Worst = val.(int64)
|
||||||
|
case "thresh":
|
||||||
|
sa.Threshold = val.(int64)
|
||||||
|
case "raw_value":
|
||||||
|
sa.RawValue = val.(int64)
|
||||||
|
case "raw_string":
|
||||||
|
sa.RawString = val.(string)
|
||||||
|
case "when_failed":
|
||||||
|
sa.WhenFailed = val.(string)
|
||||||
|
|
||||||
|
//generated
|
||||||
|
case "transformed_value":
|
||||||
|
sa.TransformedValue = val.(int64)
|
||||||
|
case "status":
|
||||||
|
sa.Status = val.(int64)
|
||||||
|
case "status_reason":
|
||||||
|
sa.StatusReason = val.(string)
|
||||||
|
case "failure_rate":
|
||||||
|
sa.FailureRate = val.(float64)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//populate attribute status, using SMART Thresholds & Observed Metadata
|
||||||
|
// Chainable
|
||||||
|
func (sa *SmartAtaAttribute) PopulateAttributeStatus() *SmartAtaAttribute {
|
||||||
|
if strings.ToUpper(sa.WhenFailed) == pkg.SmartWhenFailedFailingNow {
|
||||||
|
//this attribute has previously failed
|
||||||
|
sa.Status = pkg.SmartAttributeStatusFailed
|
||||||
|
sa.StatusReason = "Attribute is failing manufacturer SMART threshold"
|
||||||
|
|
||||||
|
} else if strings.ToUpper(sa.WhenFailed) == pkg.SmartWhenFailedInThePast {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusWarning
|
||||||
|
sa.StatusReason = "Attribute has previously failed manufacturer SMART threshold"
|
||||||
|
}
|
||||||
|
|
||||||
|
if smartMetadata, ok := thresholds.AtaMetadata[sa.AttributeId]; ok {
|
||||||
|
sa.ValidateThreshold(smartMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sa
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare the attribute (raw, normalized, transformed) value to observed thresholds, and update status if necessary
|
||||||
|
func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttributeMetadata) {
|
||||||
|
//TODO: multiple rules
|
||||||
|
// try to predict the failure rates for observed thresholds that have 0 failure rate and error bars.
|
||||||
|
// - if the attribute is critical
|
||||||
|
// - the failure rate is over 10 - set to failed
|
||||||
|
// - the attribute does not match any threshold, set to warn
|
||||||
|
// - if the attribute is not critical
|
||||||
|
// - if failure rate is above 20 - set to failed
|
||||||
|
// - if failure rate is above 10 but below 20 - set to warn
|
||||||
|
|
||||||
|
//update the smart attribute status based on Observed thresholds.
|
||||||
|
var value int64
|
||||||
|
if smartMetadata.DisplayType == thresholds.AtaSmartAttributeDisplayTypeNormalized {
|
||||||
|
value = int64(sa.Value)
|
||||||
|
} else if smartMetadata.DisplayType == thresholds.AtaSmartAttributeDisplayTypeTransformed {
|
||||||
|
value = sa.TransformedValue
|
||||||
|
} else {
|
||||||
|
value = sa.RawValue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, obsThresh := range smartMetadata.ObservedThresholds {
|
||||||
|
|
||||||
|
//check if "value" is in this bucket
|
||||||
|
if ((obsThresh.Low == obsThresh.High) && value == obsThresh.Low) ||
|
||||||
|
(obsThresh.Low < value && value <= obsThresh.High) {
|
||||||
|
sa.FailureRate = obsThresh.AnnualFailureRate
|
||||||
|
|
||||||
|
if smartMetadata.Critical {
|
||||||
|
if obsThresh.AnnualFailureRate >= 0.10 {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusFailed
|
||||||
|
sa.StatusReason = "Observed Failure Rate for Critical Attribute is greater than 10%"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if obsThresh.AnnualFailureRate >= 0.20 {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusFailed
|
||||||
|
sa.StatusReason = "Observed Failure Rate for Attribute is greater than 20%"
|
||||||
|
} else if obsThresh.AnnualFailureRate >= 0.10 {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusWarning
|
||||||
|
sa.StatusReason = "Observed Failure Rate for Attribute is greater than 10%"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//we've found the correct bucket, we can drop out of this loop
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// no bucket found
|
||||||
|
if smartMetadata.Critical {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusWarning
|
||||||
|
sa.StatusReason = "Could not determine Observed Failure Rate for Critical Attribute"
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
@ -0,0 +1,7 @@
|
|||||||
|
package measurements
|
||||||
|
|
||||||
|
type SmartAttribute interface {
|
||||||
|
Flatten() (fields map[string]interface{})
|
||||||
|
Inflate(key string, val interface{})
|
||||||
|
GetStatus() int64
|
||||||
|
}
|
@ -0,0 +1,83 @@
|
|||||||
|
package measurements
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SmartNvmeAttribute struct {
|
||||||
|
AttributeId string `json:"attribute_id"` //json string from smartctl
|
||||||
|
Value int64 `json:"value"`
|
||||||
|
Threshold int64 `json:"thresh"`
|
||||||
|
|
||||||
|
TransformedValue int64 `json:"transformed_value"`
|
||||||
|
Status int64 `json:"status"`
|
||||||
|
StatusReason string `json:"status_reason,omitempty"`
|
||||||
|
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sa *SmartNvmeAttribute) GetStatus() int64 {
|
||||||
|
return sa.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sa *SmartNvmeAttribute) Flatten() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
fmt.Sprintf("attr.%s.attribute_id", sa.AttributeId): sa.AttributeId,
|
||||||
|
fmt.Sprintf("attr.%s.value", sa.AttributeId): sa.Value,
|
||||||
|
fmt.Sprintf("attr.%s.thresh", sa.AttributeId): sa.Threshold,
|
||||||
|
|
||||||
|
//Generated Data
|
||||||
|
fmt.Sprintf("attr.%s.transformed_value", sa.AttributeId): sa.TransformedValue,
|
||||||
|
fmt.Sprintf("attr.%s.status", sa.AttributeId): sa.Status,
|
||||||
|
fmt.Sprintf("attr.%s.status_reason", sa.AttributeId): sa.StatusReason,
|
||||||
|
fmt.Sprintf("attr.%s.failure_rate", sa.AttributeId): sa.FailureRate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (sa *SmartNvmeAttribute) Inflate(key string, val interface{}) {
|
||||||
|
if val == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
keyParts := strings.Split(key, ".")
|
||||||
|
|
||||||
|
switch keyParts[2] {
|
||||||
|
case "attribute_id":
|
||||||
|
sa.AttributeId = val.(string)
|
||||||
|
case "value":
|
||||||
|
sa.Value = val.(int64)
|
||||||
|
case "thresh":
|
||||||
|
sa.Threshold = val.(int64)
|
||||||
|
|
||||||
|
//generated
|
||||||
|
case "transformed_value":
|
||||||
|
sa.TransformedValue = val.(int64)
|
||||||
|
case "status":
|
||||||
|
sa.Status = val.(int64)
|
||||||
|
case "status_reason":
|
||||||
|
sa.StatusReason = val.(string)
|
||||||
|
case "failure_rate":
|
||||||
|
sa.FailureRate = val.(float64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//populate attribute status, using SMART Thresholds & Observed Metadata
|
||||||
|
// Chainable
|
||||||
|
func (sa *SmartNvmeAttribute) PopulateAttributeStatus() *SmartNvmeAttribute {
|
||||||
|
|
||||||
|
//-1 is a special number meaning no threshold.
|
||||||
|
if sa.Threshold != -1 {
|
||||||
|
if smartMetadata, ok := thresholds.NmveMetadata[sa.AttributeId]; ok {
|
||||||
|
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
|
||||||
|
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
|
||||||
|
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusFailed
|
||||||
|
sa.StatusReason = "Attribute is failing recommended SMART threshold"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//TODO: eventually figure out the critical_warning bits and determine correct error messages here.
|
||||||
|
|
||||||
|
return sa
|
||||||
|
}
|
@ -0,0 +1,83 @@
|
|||||||
|
package measurements
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SmartScsiAttribute struct {
|
||||||
|
AttributeId string `json:"attribute_id"` //json string from smartctl
|
||||||
|
Value int64 `json:"value"`
|
||||||
|
Threshold int64 `json:"thresh"`
|
||||||
|
|
||||||
|
TransformedValue int64 `json:"transformed_value"`
|
||||||
|
Status int64 `json:"status"`
|
||||||
|
StatusReason string `json:"status_reason,omitempty"`
|
||||||
|
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sa *SmartScsiAttribute) GetStatus() int64 {
|
||||||
|
return sa.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sa *SmartScsiAttribute) Flatten() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
fmt.Sprintf("attr.%s.attribute_id", sa.AttributeId): sa.AttributeId,
|
||||||
|
fmt.Sprintf("attr.%s.value", sa.AttributeId): sa.Value,
|
||||||
|
fmt.Sprintf("attr.%s.thresh", sa.AttributeId): sa.Threshold,
|
||||||
|
|
||||||
|
//Generated Data
|
||||||
|
fmt.Sprintf("attr.%s.transformed_value", sa.AttributeId): sa.TransformedValue,
|
||||||
|
fmt.Sprintf("attr.%s.status", sa.AttributeId): sa.Status,
|
||||||
|
fmt.Sprintf("attr.%s.status_reason", sa.AttributeId): sa.StatusReason,
|
||||||
|
fmt.Sprintf("attr.%s.failure_rate", sa.AttributeId): sa.FailureRate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (sa *SmartScsiAttribute) Inflate(key string, val interface{}) {
|
||||||
|
if val == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
keyParts := strings.Split(key, ".")
|
||||||
|
|
||||||
|
switch keyParts[2] {
|
||||||
|
case "attribute_id":
|
||||||
|
sa.AttributeId = val.(string)
|
||||||
|
case "value":
|
||||||
|
sa.Value = val.(int64)
|
||||||
|
case "thresh":
|
||||||
|
sa.Threshold = val.(int64)
|
||||||
|
|
||||||
|
//generated
|
||||||
|
case "transformed_value":
|
||||||
|
sa.TransformedValue = val.(int64)
|
||||||
|
case "status":
|
||||||
|
sa.Status = val.(int64)
|
||||||
|
case "status_reason":
|
||||||
|
sa.StatusReason = val.(string)
|
||||||
|
case "failure_rate":
|
||||||
|
sa.FailureRate = val.(float64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
//populate attribute status, using SMART Thresholds & Observed Metadata
|
||||||
|
//Chainable
|
||||||
|
func (sa *SmartScsiAttribute) PopulateAttributeStatus() *SmartScsiAttribute {
|
||||||
|
|
||||||
|
//-1 is a special number meaning no threshold.
|
||||||
|
if sa.Threshold != -1 {
|
||||||
|
if smartMetadata, ok := thresholds.NmveMetadata[sa.AttributeId]; ok {
|
||||||
|
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
|
||||||
|
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
|
||||||
|
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
|
||||||
|
sa.Status = pkg.SmartAttributeStatusFailed
|
||||||
|
sa.StatusReason = "Attribute is failing recommended SMART threshold"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sa
|
||||||
|
}
|
@ -0,0 +1,34 @@
|
|||||||
|
package measurements
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SmartTemperature struct {
|
||||||
|
Date time.Time `json:"date"`
|
||||||
|
Temp int64 `json:"temp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *SmartTemperature) Flatten() (tags map[string]string, fields map[string]interface{}) {
|
||||||
|
fields = map[string]interface{}{
|
||||||
|
"temp": st.Temp,
|
||||||
|
}
|
||||||
|
tags = map[string]string{}
|
||||||
|
|
||||||
|
return tags, fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *SmartTemperature) Inflate(key string, val interface{}) {
|
||||||
|
if val == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if key == "temp" {
|
||||||
|
switch t := val.(type) {
|
||||||
|
case int64:
|
||||||
|
st.Temp = t
|
||||||
|
case float64:
|
||||||
|
st.Temp = int64(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,436 @@
|
|||||||
|
package measurements_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSmart_Flatten(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
smart := measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: pkg.DeviceProtocolAta,
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Attributes: nil,
|
||||||
|
Status: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.Equal(t, map[string]string{"device_protocol": "ATA", "device_wwn": "test-wwn"}, tags)
|
||||||
|
require.Equal(t, map[string]interface{}{"power_cycle_count": int64(10), "power_on_hours": int64(10), "temp": int64(50)}, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmart_Flatten_ATA(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
smart := measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: pkg.DeviceProtocolAta,
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Status: 0,
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{
|
||||||
|
"1": &measurements.SmartAtaAttribute{
|
||||||
|
AttributeId: 1,
|
||||||
|
Value: 100,
|
||||||
|
Threshold: 1,
|
||||||
|
Worst: 100,
|
||||||
|
RawValue: 0,
|
||||||
|
RawString: "0",
|
||||||
|
WhenFailed: "",
|
||||||
|
},
|
||||||
|
"2": &measurements.SmartAtaAttribute{
|
||||||
|
AttributeId: 2,
|
||||||
|
Value: 135,
|
||||||
|
Threshold: 54,
|
||||||
|
Worst: 135,
|
||||||
|
RawValue: 108,
|
||||||
|
RawString: "108",
|
||||||
|
WhenFailed: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.Equal(t, map[string]string{"device_protocol": "ATA", "device_wwn": "test-wwn"}, tags)
|
||||||
|
require.Equal(t, map[string]interface{}{
|
||||||
|
"attr.1.attribute_id": "1",
|
||||||
|
"attr.1.failure_rate": float64(0),
|
||||||
|
"attr.1.raw_string": "0",
|
||||||
|
"attr.1.raw_value": int64(0),
|
||||||
|
"attr.1.status": int64(0),
|
||||||
|
"attr.1.status_reason": "",
|
||||||
|
"attr.1.thresh": int64(1),
|
||||||
|
"attr.1.transformed_value": int64(0),
|
||||||
|
"attr.1.value": int64(100),
|
||||||
|
"attr.1.when_failed": "",
|
||||||
|
"attr.1.worst": int64(100),
|
||||||
|
|
||||||
|
"attr.2.attribute_id": "2",
|
||||||
|
"attr.2.failure_rate": float64(0),
|
||||||
|
"attr.2.raw_string": "108",
|
||||||
|
"attr.2.raw_value": int64(108),
|
||||||
|
"attr.2.status": int64(0),
|
||||||
|
"attr.2.status_reason": "",
|
||||||
|
"attr.2.thresh": int64(54),
|
||||||
|
"attr.2.transformed_value": int64(0),
|
||||||
|
"attr.2.value": int64(135),
|
||||||
|
"attr.2.when_failed": "",
|
||||||
|
"attr.2.worst": int64(135),
|
||||||
|
|
||||||
|
"power_cycle_count": int64(10),
|
||||||
|
"power_on_hours": int64(10),
|
||||||
|
"temp": int64(50),
|
||||||
|
}, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmart_Flatten_SCSI(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
smart := measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: pkg.DeviceProtocolScsi,
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Status: 0,
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{
|
||||||
|
"read_errors_corrected_by_eccfast": &measurements.SmartScsiAttribute{
|
||||||
|
AttributeId: "read_errors_corrected_by_eccfast",
|
||||||
|
Value: int64(300357663),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.Equal(t, map[string]string{"device_protocol": "SCSI", "device_wwn": "test-wwn"}, tags)
|
||||||
|
require.Equal(t, map[string]interface{}{
|
||||||
|
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
|
||||||
|
"attr.read_errors_corrected_by_eccfast.failure_rate": float64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.status": int64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.status_reason": "",
|
||||||
|
"attr.read_errors_corrected_by_eccfast.thresh": int64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.transformed_value": int64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.value": int64(300357663),
|
||||||
|
"power_cycle_count": int64(10),
|
||||||
|
"power_on_hours": int64(10),
|
||||||
|
"temp": int64(50)},
|
||||||
|
fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmart_Flatten_NVMe(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
smart := measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: pkg.DeviceProtocolNvme,
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Status: 0,
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{
|
||||||
|
"available_spare": &measurements.SmartNvmeAttribute{
|
||||||
|
AttributeId: "available_spare",
|
||||||
|
Value: int64(100),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.Equal(t, map[string]string{"device_protocol": "NVMe", "device_wwn": "test-wwn"}, tags)
|
||||||
|
require.Equal(t, map[string]interface{}{
|
||||||
|
"attr.available_spare.attribute_id": "available_spare",
|
||||||
|
"attr.available_spare.failure_rate": float64(0),
|
||||||
|
"attr.available_spare.status": int64(0),
|
||||||
|
"attr.available_spare.status_reason": "",
|
||||||
|
"attr.available_spare.thresh": int64(0),
|
||||||
|
"attr.available_spare.transformed_value": int64(0),
|
||||||
|
"attr.available_spare.value": int64(100),
|
||||||
|
"power_cycle_count": int64(10),
|
||||||
|
"power_on_hours": int64(10),
|
||||||
|
"temp": int64(50)}, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
attrs := map[string]interface{}{
|
||||||
|
"_time": timeNow,
|
||||||
|
"device_wwn": "test-wwn",
|
||||||
|
"device_protocol": pkg.DeviceProtocolAta,
|
||||||
|
"attr.1.attribute_id": "1",
|
||||||
|
"attr.1.failure_rate": float64(0),
|
||||||
|
"attr.1.raw_string": "108",
|
||||||
|
"attr.1.raw_value": int64(108),
|
||||||
|
"attr.1.status": int64(0),
|
||||||
|
"attr.1.status_reason": "",
|
||||||
|
"attr.1.thresh": int64(54),
|
||||||
|
"attr.1.transformed_value": int64(0),
|
||||||
|
"attr.1.value": int64(135),
|
||||||
|
"attr.1.when_failed": "",
|
||||||
|
"attr.1.worst": int64(135),
|
||||||
|
"power_cycle_count": int64(10),
|
||||||
|
"power_on_hours": int64(10),
|
||||||
|
"temp": int64(50),
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
smart, err := measurements.NewSmartFromInfluxDB(attrs)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: "ATA",
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{
|
||||||
|
"1": &measurements.SmartAtaAttribute{
|
||||||
|
AttributeId: 1,
|
||||||
|
Value: 135,
|
||||||
|
Threshold: 54,
|
||||||
|
Worst: 135,
|
||||||
|
RawValue: 108,
|
||||||
|
RawString: "108",
|
||||||
|
WhenFailed: "",
|
||||||
|
},
|
||||||
|
}, Status: 0}, smart)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
attrs := map[string]interface{}{
|
||||||
|
"_time": timeNow,
|
||||||
|
"device_wwn": "test-wwn",
|
||||||
|
"device_protocol": pkg.DeviceProtocolNvme,
|
||||||
|
"attr.available_spare.attribute_id": "available_spare",
|
||||||
|
"attr.available_spare.failure_rate": float64(0),
|
||||||
|
"attr.available_spare.status": int64(0),
|
||||||
|
"attr.available_spare.status_reason": "",
|
||||||
|
"attr.available_spare.thresh": int64(0),
|
||||||
|
"attr.available_spare.transformed_value": int64(0),
|
||||||
|
"attr.available_spare.value": int64(100),
|
||||||
|
"power_cycle_count": int64(10),
|
||||||
|
"power_on_hours": int64(10),
|
||||||
|
"temp": int64(50),
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
smart, err := measurements.NewSmartFromInfluxDB(attrs)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: "NVMe",
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{
|
||||||
|
"available_spare": &measurements.SmartNvmeAttribute{
|
||||||
|
AttributeId: "available_spare",
|
||||||
|
Value: int64(100),
|
||||||
|
},
|
||||||
|
}, Status: 0}, smart)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
timeNow := time.Now()
|
||||||
|
attrs := map[string]interface{}{
|
||||||
|
"_time": timeNow,
|
||||||
|
"device_wwn": "test-wwn",
|
||||||
|
"device_protocol": pkg.DeviceProtocolScsi,
|
||||||
|
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
|
||||||
|
"attr.read_errors_corrected_by_eccfast.failure_rate": float64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.status": int64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.status_reason": "",
|
||||||
|
"attr.read_errors_corrected_by_eccfast.thresh": int64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.transformed_value": int64(0),
|
||||||
|
"attr.read_errors_corrected_by_eccfast.value": int64(300357663),
|
||||||
|
"power_cycle_count": int64(10),
|
||||||
|
"power_on_hours": int64(10),
|
||||||
|
"temp": int64(50),
|
||||||
|
}
|
||||||
|
|
||||||
|
//test
|
||||||
|
smart, err := measurements.NewSmartFromInfluxDB(attrs)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &measurements.Smart{
|
||||||
|
Date: timeNow,
|
||||||
|
DeviceWWN: "test-wwn",
|
||||||
|
DeviceProtocol: "SCSI",
|
||||||
|
Temp: 50,
|
||||||
|
PowerOnHours: 10,
|
||||||
|
PowerCycleCount: 10,
|
||||||
|
Attributes: map[string]measurements.SmartAttribute{
|
||||||
|
"read_errors_corrected_by_eccfast": &measurements.SmartScsiAttribute{
|
||||||
|
AttributeId: "read_errors_corrected_by_eccfast",
|
||||||
|
Value: int64(300357663),
|
||||||
|
},
|
||||||
|
}, Status: 0}, smart)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromCollectorSmartInfo(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
smartDataFile, err := os.Open("../testdata/smart-ata.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer smartDataFile.Close()
|
||||||
|
|
||||||
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
|
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//test
|
||||||
|
smartMdl := measurements.Smart{}
|
||||||
|
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||||
|
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
||||||
|
require.Equal(t, 18, len(smartMdl.Attributes))
|
||||||
|
|
||||||
|
//check that temperature was correctly parsed
|
||||||
|
|
||||||
|
require.Equal(t, int64(163210330144), smartMdl.Attributes["194"].(*measurements.SmartAtaAttribute).RawValue)
|
||||||
|
require.Equal(t, int64(32), smartMdl.Attributes["194"].(*measurements.SmartAtaAttribute).TransformedValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromCollectorSmartInfo_Fail_Smart(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
smartDataFile, err := os.Open("../testdata/smart-fail.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer smartDataFile.Close()
|
||||||
|
|
||||||
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
|
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//test
|
||||||
|
smartMdl := measurements.Smart{}
|
||||||
|
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||||
|
require.Equal(t, pkg.DeviceStatusFailedSmart, smartMdl.Status)
|
||||||
|
require.Equal(t, 0, len(smartMdl.Attributes))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromCollectorSmartInfo_Fail_ScrutinySmart(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
smartDataFile, err := os.Open("../testdata/smart-fail2.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer smartDataFile.Close()
|
||||||
|
|
||||||
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
|
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//test
|
||||||
|
smartMdl := measurements.Smart{}
|
||||||
|
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||||
|
require.Equal(t, pkg.DeviceStatusFailedScrutiny|pkg.DeviceStatusFailedSmart, smartMdl.Status)
|
||||||
|
require.Equal(t, 17, len(smartMdl.Attributes))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
smartDataFile, err := os.Open("../testdata/smart-nvme.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer smartDataFile.Close()
|
||||||
|
|
||||||
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
|
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//test
|
||||||
|
smartMdl := measurements.Smart{}
|
||||||
|
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||||
|
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
||||||
|
require.Equal(t, 16, len(smartMdl.Attributes))
|
||||||
|
|
||||||
|
require.Equal(t, int64(111303174), smartMdl.Attributes["host_reads"].(*measurements.SmartNvmeAttribute).Value)
|
||||||
|
require.Equal(t, int64(83170961), smartMdl.Attributes["host_writes"].(*measurements.SmartNvmeAttribute).Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromCollectorSmartInfo_Scsi(t *testing.T) {
|
||||||
|
//setup
|
||||||
|
smartDataFile, err := os.Open("../testdata/smart-scsi.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer smartDataFile.Close()
|
||||||
|
|
||||||
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
|
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//test
|
||||||
|
smartMdl := measurements.Smart{}
|
||||||
|
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||||
|
|
||||||
|
//assert
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||||
|
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
||||||
|
require.Equal(t, 13, len(smartMdl.Attributes))
|
||||||
|
|
||||||
|
require.Equal(t, int64(56), smartMdl.Attributes["scsi_grown_defect_list"].(*measurements.SmartScsiAttribute).Value)
|
||||||
|
require.Equal(t, int64(300357663), smartMdl.Attributes["read_errors_corrected_by_eccfast"].(*measurements.SmartScsiAttribute).Value) //total_errors_corrected
|
||||||
|
}
|
@ -0,0 +1,5 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
// Temperature Format
|
||||||
|
// Date Format
|
||||||
|
// Device History window
|
@ -0,0 +1,97 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
//webapp/backend/pkg/web/testdata/register-devices-req.json
|
||||||
|
devices := "webapp/backend/pkg/web/testdata/register-devices-req.json"
|
||||||
|
|
||||||
|
smartData := map[string][]string{
|
||||||
|
"0x5000cca264eb01d7": {"webapp/backend/pkg/models/testdata/smart-ata.json", "webapp/backend/pkg/models/testdata/smart-ata-date.json", "webapp/backend/pkg/models/testdata/smart-ata-date2.json"},
|
||||||
|
"0x5000cca264ec3183": {"webapp/backend/pkg/models/testdata/smart-fail2.json"},
|
||||||
|
"0x5002538e40a22954": {"webapp/backend/pkg/models/testdata/smart-nvme.json"},
|
||||||
|
"0x5000cca252c859cc": {"webapp/backend/pkg/models/testdata/smart-scsi.json"},
|
||||||
|
"0x5000cca264ebc248": {"webapp/backend/pkg/models/testdata/smart-scsi2.json"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// send a post request to register devices
|
||||||
|
file, err := os.Open(devices)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("ERROR %v", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
_, err = SendPostRequest("http://localhost:9090/api/devices/register", file)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("ERROR %v", err)
|
||||||
|
}
|
||||||
|
//
|
||||||
|
|
||||||
|
for diskId, smartDataFileNames := range smartData {
|
||||||
|
for _, smartDataFileName := range smartDataFileNames {
|
||||||
|
for daysToSubtract := 0; daysToSubtract <= 30; daysToSubtract++ { //add 4 weeks worth of data
|
||||||
|
smartDataReader, err := readSmartDataFileFixTimestamp(daysToSubtract, smartDataFileName)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("ERROR %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = SendPostRequest(fmt.Sprintf("http://localhost:9090/api/device/%s/smart", diskId), smartDataReader)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("ERROR %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func SendPostRequest(url string, file io.Reader) ([]byte, error) {
|
||||||
|
response, err := http.Post(url, "application/json", file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
log.Printf("%v\n", response.Status)
|
||||||
|
|
||||||
|
return ioutil.ReadAll(response.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InfluxDB will throw an error/ignore any submitted data with a timestamp older than the
|
||||||
|
// retention period. Lets fix this by opening test files, modifying the timestamp and returning an io.Reader
|
||||||
|
func readSmartDataFileFixTimestamp(daysToSubtract int, smartDataFilepath string) (io.Reader, error) {
|
||||||
|
metricsfile, err := os.Open(smartDataFilepath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metricsFileData, err := ioutil.ReadAll(metricsfile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//unmarshal because we need to change the timestamp
|
||||||
|
var smartData collector.SmartInfo
|
||||||
|
err = json.Unmarshal(metricsFileData, &smartData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
daysToSubtractInHours := time.Duration(-1 * 24 * daysToSubtract)
|
||||||
|
smartData.LocalTime.TimeT = time.Now().Add(daysToSubtractInHours * time.Hour).Unix()
|
||||||
|
updatedSmartDataBytes, err := json.Marshal(smartData)
|
||||||
|
|
||||||
|
return bytes.NewReader(updatedSmartDataBytes), nil
|
||||||
|
}
|
@ -0,0 +1,846 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"svn_revision": "4883",
|
||||||
|
"platform_info": "x86_64-linux-4.19.128-flatcar",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"-j",
|
||||||
|
"-a",
|
||||||
|
"/dev/sdb"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "/dev/sdb",
|
||||||
|
"info_name": "/dev/sdb [SAT]",
|
||||||
|
"type": "sat",
|
||||||
|
"protocol": "ATA"
|
||||||
|
},
|
||||||
|
"model_name": "WDC WD140EDFZ-11A0VA0",
|
||||||
|
"serial_number": "9RK1XXXX",
|
||||||
|
"wwn": {
|
||||||
|
"naa": 5,
|
||||||
|
"oui": 3274,
|
||||||
|
"id": 10283057623
|
||||||
|
},
|
||||||
|
"firmware_version": "81.00A81",
|
||||||
|
"user_capacity": {
|
||||||
|
"blocks": 27344764928,
|
||||||
|
"bytes": 14000519643136
|
||||||
|
},
|
||||||
|
"logical_block_size": 512,
|
||||||
|
"physical_block_size": 4096,
|
||||||
|
"rotation_rate": 5400,
|
||||||
|
"form_factor": {
|
||||||
|
"ata_value": 2,
|
||||||
|
"name": "3.5 inches"
|
||||||
|
},
|
||||||
|
"in_smartctl_database": false,
|
||||||
|
"ata_version": {
|
||||||
|
"string": "ACS-2, ATA8-ACS T13/1699-D revision 4",
|
||||||
|
"major_value": 1020,
|
||||||
|
"minor_value": 41
|
||||||
|
},
|
||||||
|
"sata_version": {
|
||||||
|
"string": "SATA 3.2",
|
||||||
|
"value": 255
|
||||||
|
},
|
||||||
|
"interface_speed": {
|
||||||
|
"max": {
|
||||||
|
"sata_value": 14,
|
||||||
|
"string": "6.0 Gb/s",
|
||||||
|
"units_per_second": 60,
|
||||||
|
"bits_per_unit": 100000000
|
||||||
|
},
|
||||||
|
"current": {
|
||||||
|
"sata_value": 3,
|
||||||
|
"string": "6.0 Gb/s",
|
||||||
|
"units_per_second": 60,
|
||||||
|
"bits_per_unit": 100000000
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"local_time": {
|
||||||
|
"time_t": 1637039918,
|
||||||
|
"asctime": "Sun Jun 30 00:03:30 2021 UTC"
|
||||||
|
},
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"ata_smart_data": {
|
||||||
|
"offline_data_collection": {
|
||||||
|
"status": {
|
||||||
|
"value": 130,
|
||||||
|
"string": "was completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"completion_seconds": 101
|
||||||
|
},
|
||||||
|
"self_test": {
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "in progress, 10% remaining",
|
||||||
|
"remaining_percent": 10
|
||||||
|
},
|
||||||
|
"polling_minutes": {
|
||||||
|
"short": 2,
|
||||||
|
"extended": 1479
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"capabilities": {
|
||||||
|
"values": [
|
||||||
|
91,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"exec_offline_immediate_supported": true,
|
||||||
|
"offline_is_aborted_upon_new_cmd": false,
|
||||||
|
"offline_surface_scan_supported": true,
|
||||||
|
"self_tests_supported": true,
|
||||||
|
"conveyance_self_test_supported": false,
|
||||||
|
"selective_self_test_supported": true,
|
||||||
|
"attribute_autosave_enabled": true,
|
||||||
|
"error_logging_supported": true,
|
||||||
|
"gp_logging_supported": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ata_sct_capabilities": {
|
||||||
|
"value": 61,
|
||||||
|
"error_recovery_control_supported": true,
|
||||||
|
"feature_control_supported": true,
|
||||||
|
"data_table_supported": true
|
||||||
|
},
|
||||||
|
"ata_smart_attributes": {
|
||||||
|
"revision": 16,
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"name": "Raw_Read_Error_Rate",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 11,
|
||||||
|
"string": "PO-R-- ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"name": "Throughput_Performance",
|
||||||
|
"value": 135,
|
||||||
|
"worst": 135,
|
||||||
|
"thresh": 54,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 4,
|
||||||
|
"string": "--S--- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": false,
|
||||||
|
"performance": true,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 108,
|
||||||
|
"string": "108"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"name": "Spin_Up_Time",
|
||||||
|
"value": 81,
|
||||||
|
"worst": 81,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 7,
|
||||||
|
"string": "POS--- ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": true,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 30089675132,
|
||||||
|
"string": "380 (Average 380)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"name": "Start_Stop_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 9,
|
||||||
|
"string": "9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"name": "Reallocated_Sector_Ct",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 51,
|
||||||
|
"string": "PO--CK ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7,
|
||||||
|
"name": "Seek_Error_Rate",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "-O-R-- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 8,
|
||||||
|
"name": "Seek_Time_Performance",
|
||||||
|
"value": 133,
|
||||||
|
"worst": 133,
|
||||||
|
"thresh": 20,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 4,
|
||||||
|
"string": "--S--- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": false,
|
||||||
|
"performance": true,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 9,
|
||||||
|
"name": "Power_On_Hours",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 1730,
|
||||||
|
"string": "1730"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 10,
|
||||||
|
"name": "Spin_Retry_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 12,
|
||||||
|
"name": "Power_Cycle_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 50,
|
||||||
|
"string": "-O--CK ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 9,
|
||||||
|
"string": "9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22,
|
||||||
|
"name": "Unknown_Attribute",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 25,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 35,
|
||||||
|
"string": "PO---K ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 100,
|
||||||
|
"string": "100"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 192,
|
||||||
|
"name": "Power-Off_Retract_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 50,
|
||||||
|
"string": "-O--CK ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 329,
|
||||||
|
"string": "329"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 193,
|
||||||
|
"name": "Load_Cycle_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 329,
|
||||||
|
"string": "329"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 194,
|
||||||
|
"name": "Temperature_Celsius",
|
||||||
|
"value": 51,
|
||||||
|
"worst": 51,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "-O---- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 163210330144,
|
||||||
|
"string": "32 (Min/Max 24/38)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 196,
|
||||||
|
"name": "Reallocated_Event_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 50,
|
||||||
|
"string": "-O--CK ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 197,
|
||||||
|
"name": "Current_Pending_Sector",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 34,
|
||||||
|
"string": "-O---K ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 198,
|
||||||
|
"name": "Offline_Uncorrectable",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 8,
|
||||||
|
"string": "---R-- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": false,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 199,
|
||||||
|
"name": "UDMA_CRC_Error_Count",
|
||||||
|
"value": 100,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "-O-R-- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 1730
|
||||||
|
},
|
||||||
|
"power_cycle_count": 9,
|
||||||
|
"temperature": {
|
||||||
|
"current": 32
|
||||||
|
},
|
||||||
|
"ata_smart_error_log": {
|
||||||
|
"summary": {
|
||||||
|
"revision": 1,
|
||||||
|
"count": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ata_smart_self_test_log": {
|
||||||
|
"standard": {
|
||||||
|
"revision": 1,
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1708
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1684
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1661
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1636
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "Extended offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1624
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1541
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1517
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1493
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1469
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1445
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "Extended offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1439
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1373
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1349
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1325
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1301
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1277
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1253
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "Extended offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1252
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1205
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1181
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1157
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"count": 21,
|
||||||
|
"error_count_total": 0,
|
||||||
|
"error_count_outdated": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ata_smart_selective_self_test_log": {
|
||||||
|
"revision": 1,
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"flags": {
|
||||||
|
"value": 0,
|
||||||
|
"remainder_scan_enabled": false
|
||||||
|
},
|
||||||
|
"power_up_scan_resume_minutes": 0
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,846 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"svn_revision": "4883",
|
||||||
|
"platform_info": "x86_64-linux-4.19.128-flatcar",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"-j",
|
||||||
|
"-a",
|
||||||
|
"/dev/sdb"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "/dev/sdb",
|
||||||
|
"info_name": "/dev/sdb [SAT]",
|
||||||
|
"type": "sat",
|
||||||
|
"protocol": "ATA"
|
||||||
|
},
|
||||||
|
"model_name": "WDC WD140EDFZ-11A0VA0",
|
||||||
|
"serial_number": "9RK1XXXX",
|
||||||
|
"wwn": {
|
||||||
|
"naa": 5,
|
||||||
|
"oui": 3274,
|
||||||
|
"id": 10283057623
|
||||||
|
},
|
||||||
|
"firmware_version": "81.00A81",
|
||||||
|
"user_capacity": {
|
||||||
|
"blocks": 27344764928,
|
||||||
|
"bytes": 14000519643136
|
||||||
|
},
|
||||||
|
"logical_block_size": 512,
|
||||||
|
"physical_block_size": 4096,
|
||||||
|
"rotation_rate": 5400,
|
||||||
|
"form_factor": {
|
||||||
|
"ata_value": 2,
|
||||||
|
"name": "3.5 inches"
|
||||||
|
},
|
||||||
|
"in_smartctl_database": false,
|
||||||
|
"ata_version": {
|
||||||
|
"string": "ACS-2, ATA8-ACS T13/1699-D revision 4",
|
||||||
|
"major_value": 1020,
|
||||||
|
"minor_value": 41
|
||||||
|
},
|
||||||
|
"sata_version": {
|
||||||
|
"string": "SATA 3.2",
|
||||||
|
"value": 255
|
||||||
|
},
|
||||||
|
"interface_speed": {
|
||||||
|
"max": {
|
||||||
|
"sata_value": 14,
|
||||||
|
"string": "6.0 Gb/s",
|
||||||
|
"units_per_second": 60,
|
||||||
|
"bits_per_unit": 100000000
|
||||||
|
},
|
||||||
|
"current": {
|
||||||
|
"sata_value": 3,
|
||||||
|
"string": "6.0 Gb/s",
|
||||||
|
"units_per_second": 60,
|
||||||
|
"bits_per_unit": 100000000
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"local_time": {
|
||||||
|
"time_t": 1637039918,
|
||||||
|
"asctime": "Tue Feb 23 00:03:30 2021 UTC"
|
||||||
|
},
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"ata_smart_data": {
|
||||||
|
"offline_data_collection": {
|
||||||
|
"status": {
|
||||||
|
"value": 130,
|
||||||
|
"string": "was completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"completion_seconds": 101
|
||||||
|
},
|
||||||
|
"self_test": {
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "in progress, 10% remaining",
|
||||||
|
"remaining_percent": 10
|
||||||
|
},
|
||||||
|
"polling_minutes": {
|
||||||
|
"short": 2,
|
||||||
|
"extended": 1479
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"capabilities": {
|
||||||
|
"values": [
|
||||||
|
91,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"exec_offline_immediate_supported": true,
|
||||||
|
"offline_is_aborted_upon_new_cmd": false,
|
||||||
|
"offline_surface_scan_supported": true,
|
||||||
|
"self_tests_supported": true,
|
||||||
|
"conveyance_self_test_supported": false,
|
||||||
|
"selective_self_test_supported": true,
|
||||||
|
"attribute_autosave_enabled": true,
|
||||||
|
"error_logging_supported": true,
|
||||||
|
"gp_logging_supported": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ata_sct_capabilities": {
|
||||||
|
"value": 61,
|
||||||
|
"error_recovery_control_supported": true,
|
||||||
|
"feature_control_supported": true,
|
||||||
|
"data_table_supported": true
|
||||||
|
},
|
||||||
|
"ata_smart_attributes": {
|
||||||
|
"revision": 16,
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"name": "Raw_Read_Error_Rate",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 11,
|
||||||
|
"string": "PO-R-- ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"name": "Throughput_Performance",
|
||||||
|
"value": 125,
|
||||||
|
"worst": 135,
|
||||||
|
"thresh": 54,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 4,
|
||||||
|
"string": "--S--- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": false,
|
||||||
|
"performance": true,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 118,
|
||||||
|
"string": "108"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"name": "Spin_Up_Time",
|
||||||
|
"value": 71,
|
||||||
|
"worst": 81,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 7,
|
||||||
|
"string": "POS--- ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": true,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 30089675142,
|
||||||
|
"string": "380 (Average 380)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"name": "Start_Stop_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 19,
|
||||||
|
"string": "9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"name": "Reallocated_Sector_Ct",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 51,
|
||||||
|
"string": "PO--CK ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7,
|
||||||
|
"name": "Seek_Error_Rate",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "-O-R-- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 8,
|
||||||
|
"name": "Seek_Time_Performance",
|
||||||
|
"value": 123,
|
||||||
|
"worst": 133,
|
||||||
|
"thresh": 20,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 4,
|
||||||
|
"string": "--S--- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": false,
|
||||||
|
"performance": true,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 28,
|
||||||
|
"string": "18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 9,
|
||||||
|
"name": "Power_On_Hours",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 1740,
|
||||||
|
"string": "1730"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 10,
|
||||||
|
"name": "Spin_Retry_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 1,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 12,
|
||||||
|
"name": "Power_Cycle_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 50,
|
||||||
|
"string": "-O--CK ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 19,
|
||||||
|
"string": "9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22,
|
||||||
|
"name": "Unknown_Attribute",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 25,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 35,
|
||||||
|
"string": "PO---K ",
|
||||||
|
"prefailure": true,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 110,
|
||||||
|
"string": "100"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 192,
|
||||||
|
"name": "Power-Off_Retract_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 50,
|
||||||
|
"string": "-O--CK ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 339,
|
||||||
|
"string": "329"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 193,
|
||||||
|
"name": "Load_Cycle_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 18,
|
||||||
|
"string": "-O--C- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 339,
|
||||||
|
"string": "329"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 194,
|
||||||
|
"name": "Temperature_Celsius",
|
||||||
|
"value": 41,
|
||||||
|
"worst": 51,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "-O---- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 163210330154,
|
||||||
|
"string": "32 (Min/Max 24/38)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 196,
|
||||||
|
"name": "Reallocated_Event_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 50,
|
||||||
|
"string": "-O--CK ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": true,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 197,
|
||||||
|
"name": "Current_Pending_Sector",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 34,
|
||||||
|
"string": "-O---K ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": false,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": true
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 198,
|
||||||
|
"name": "Offline_Uncorrectable",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 8,
|
||||||
|
"string": "---R-- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": false,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 199,
|
||||||
|
"name": "UDMA_CRC_Error_Count",
|
||||||
|
"value": 90,
|
||||||
|
"worst": 100,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"flags": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "-O-R-- ",
|
||||||
|
"prefailure": false,
|
||||||
|
"updated_online": true,
|
||||||
|
"performance": false,
|
||||||
|
"error_rate": true,
|
||||||
|
"event_count": false,
|
||||||
|
"auto_keep": false
|
||||||
|
},
|
||||||
|
"raw": {
|
||||||
|
"value": 10,
|
||||||
|
"string": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 3030
|
||||||
|
},
|
||||||
|
"power_cycle_count": 9,
|
||||||
|
"temperature": {
|
||||||
|
"current": 62
|
||||||
|
},
|
||||||
|
"ata_smart_error_log": {
|
||||||
|
"summary": {
|
||||||
|
"revision": 1,
|
||||||
|
"count": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ata_smart_self_test_log": {
|
||||||
|
"standard": {
|
||||||
|
"revision": 1,
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1708
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1684
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1661
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1636
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "Extended offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1624
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1541
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1517
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1493
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1469
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1445
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "Extended offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1439
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1373
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1349
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1325
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1301
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1277
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1253
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 2,
|
||||||
|
"string": "Extended offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1252
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1205
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1181
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": {
|
||||||
|
"value": 1,
|
||||||
|
"string": "Short offline"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "Completed without error",
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"lifetime_hours": 1157
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"count": 21,
|
||||||
|
"error_count_total": 0,
|
||||||
|
"error_count_outdated": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ata_smart_selective_self_test_log": {
|
||||||
|
"revision": 1,
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lba_min": 0,
|
||||||
|
"lba_max": 0,
|
||||||
|
"status": {
|
||||||
|
"value": 241,
|
||||||
|
"string": "Not_testing"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"flags": {
|
||||||
|
"value": 0,
|
||||||
|
"remainder_scan_enabled": false
|
||||||
|
},
|
||||||
|
"power_up_scan_resume_minutes": 0
|
||||||
|
}
|
||||||
|
}
|
@ -1,53 +1,44 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/metadata"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
dbModels "github.com/analogj/scrutiny/webapp/backend/pkg/models/db"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"gorm.io/gorm"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetDeviceDetails(c *gin.Context) {
|
func GetDeviceDetails(c *gin.Context) {
|
||||||
db := c.MustGet("DB").(*gorm.DB)
|
|
||||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||||
device := dbModels.Device{}
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
|
||||||
if err := db.Preload("SmartResults", func(db *gorm.DB) *gorm.DB {
|
|
||||||
return db.Order("smarts.created_at DESC").Limit(40)
|
|
||||||
}).
|
|
||||||
Preload("SmartResults.AtaAttributes").
|
|
||||||
Preload("SmartResults.NvmeAttributes").
|
|
||||||
Preload("SmartResults.ScsiAttributes").
|
|
||||||
Where("wwn = ?", c.Param("wwn")).
|
|
||||||
First(&device).Error; err != nil {
|
|
||||||
|
|
||||||
|
device, err := deviceRepo.GetDeviceDetails(c, c.Param("wwn"))
|
||||||
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while retrieving device details", err)
|
logger.Errorln("An error occurred while retrieving device details", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := device.SquashHistory(); err != nil {
|
durationKey, exists := c.GetQuery("duration_key")
|
||||||
logger.Errorln("An error occurred while squashing device history", err)
|
if !exists {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
durationKey = "forever"
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := device.ApplyMetadataRules(); err != nil {
|
smartResults, err := deviceRepo.GetSmartAttributeHistory(c, c.Param("wwn"), durationKey, nil)
|
||||||
logger.Errorln("An error occurred while applying scrutiny thresholds & rules", err)
|
if err != nil {
|
||||||
|
logger.Errorln("An error occurred while retrieving device smart results", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var deviceMetadata interface{}
|
var deviceMetadata interface{}
|
||||||
if device.IsAta() {
|
if device.IsAta() {
|
||||||
deviceMetadata = metadata.AtaMetadata
|
deviceMetadata = thresholds.AtaMetadata
|
||||||
} else if device.IsNvme() {
|
} else if device.IsNvme() {
|
||||||
deviceMetadata = metadata.NmveMetadata
|
deviceMetadata = thresholds.NmveMetadata
|
||||||
} else if device.IsScsi() {
|
} else if device.IsScsi() {
|
||||||
deviceMetadata = metadata.ScsiMetadata
|
deviceMetadata = thresholds.ScsiMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"success": true, "data": device, "metadata": deviceMetadata})
|
c.JSON(http.StatusOK, gin.H{"success": true, "data": map[string]interface{}{"device": device, "smart_results": smartResults}, "metadata": deviceMetadata})
|
||||||
}
|
}
|
||||||
|
@ -1,31 +1,28 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
dbModels "github.com/analogj/scrutiny/webapp/backend/pkg/models/db"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"gorm.io/gorm"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetDevicesSummary(c *gin.Context) {
|
func GetDevicesSummary(c *gin.Context) {
|
||||||
db := c.MustGet("DB").(*gorm.DB)
|
|
||||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||||
devices := []dbModels.Device{}
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
|
||||||
//We need the last x (for now all) Smart objects for each Device, so that we can graph Temperature
|
summary, err := deviceRepo.GetSummary(c)
|
||||||
//We also need the last
|
if err != nil {
|
||||||
if err := db.Preload("SmartResults", func(db *gorm.DB) *gorm.DB {
|
logger.Errorln("An error occurred while retrieving device summary", err)
|
||||||
return db.Order("smarts.created_at DESC") //OLD: .Limit(devicesCount)
|
|
||||||
}).
|
|
||||||
Find(&devices).Error; err != nil {
|
|
||||||
logger.Errorln("Could not get device summary from DB", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"success": true,
|
"success": true,
|
||||||
"data": devices,
|
"data": map[string]interface{}{
|
||||||
|
"summary": summary,
|
||||||
|
//"temperature": tem
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,32 @@
|
|||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetDevicesSummaryTempHistory(c *gin.Context) {
|
||||||
|
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||||
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
|
||||||
|
durationKey, exists := c.GetQuery("duration_key")
|
||||||
|
if !exists {
|
||||||
|
durationKey = "week"
|
||||||
|
}
|
||||||
|
|
||||||
|
tempHistory, err := deviceRepo.GetSmartTemperatureHistory(c, durationKey)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorln("An error occurred while retrieving summary/temp history", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"success": true,
|
||||||
|
"data": map[string]interface{}{
|
||||||
|
"temp_history": tempHistory,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue