Compare commits
3 Commits
main
...
new-system
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
353d9f8853 | ||
|
|
6c3c1cae0e | ||
|
|
402d553f44 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -15,4 +15,5 @@ beszel/build
|
||||
*timestamp*
|
||||
.swc
|
||||
beszel/site/src/locales/**/*.ts
|
||||
*.bak
|
||||
*.bak
|
||||
__debug_*
|
||||
|
||||
@@ -14,6 +14,9 @@ clean:
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
test:
|
||||
go test -tags=testing ./...
|
||||
|
||||
tidy:
|
||||
go mod tidy
|
||||
|
||||
|
||||
@@ -1,10 +1,46 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/hub"
|
||||
_ "beszel/migrations"
|
||||
"os"
|
||||
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/plugins/migratecmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
hub.NewHub().Run()
|
||||
pocketBase := getPocketBase()
|
||||
h := hub.NewHub(pocketBase)
|
||||
h.Run()
|
||||
h.Start()
|
||||
}
|
||||
|
||||
// getPocketBase creates a new PocketBase app with the default config
|
||||
func getPocketBase() *pocketbase.PocketBase {
|
||||
isDev := os.Getenv("ENV") == "dev"
|
||||
|
||||
pocketBase := pocketbase.NewWithConfig(pocketbase.Config{
|
||||
DefaultDataDir: beszel.AppName + "_data",
|
||||
DefaultDev: isDev,
|
||||
})
|
||||
pocketBase.RootCmd.Version = beszel.Version
|
||||
pocketBase.RootCmd.Use = beszel.AppName
|
||||
pocketBase.RootCmd.Short = ""
|
||||
// add update command
|
||||
pocketBase.RootCmd.AddCommand(&cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update " + beszel.AppName + " to the latest version",
|
||||
Run: hub.Update,
|
||||
})
|
||||
|
||||
// enable auto creation of migration files when making collection changes in the Admin UI
|
||||
migratecmd.MustRegister(pocketBase, pocketBase.RootCmd, migratecmd.Config{
|
||||
Automigrate: isDev,
|
||||
Dir: "../../migrations",
|
||||
})
|
||||
|
||||
return pocketBase
|
||||
}
|
||||
|
||||
@@ -8,39 +8,39 @@ require (
|
||||
github.com/gliderlabs/ssh v0.3.8
|
||||
github.com/goccy/go-json v0.10.5
|
||||
github.com/pocketbase/dbx v1.11.0
|
||||
github.com/pocketbase/pocketbase v0.25.0
|
||||
github.com/pocketbase/pocketbase v0.25.8
|
||||
github.com/rhysd/go-github-selfupdate v1.2.3
|
||||
github.com/shirou/gopsutil/v4 v4.25.1
|
||||
github.com/shirou/gopsutil/v4 v4.25.2
|
||||
github.com/spf13/cast v1.7.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
|
||||
golang.org/x/crypto v0.35.0
|
||||
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.59 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.59 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.61 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.64 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 // indirect
|
||||
github.com/aws/smithy-go v1.22.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/disintegration/imaging v1.6.2 // indirect
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||
@@ -59,7 +59,7 @@ require (
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250224150550-a661cff19cfb // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
@@ -75,19 +75,18 @@ require (
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
gocloud.dev v0.40.0 // indirect
|
||||
golang.org/x/image v0.24.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/oauth2 v0.26.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.29.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/api v0.220.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 // indirect
|
||||
google.golang.org/api v0.223.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.4 // indirect
|
||||
modernc.org/libc v1.55.3 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
modernc.org/libc v1.61.13 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.8.2 // indirect
|
||||
modernc.org/sqlite v1.34.5 // indirect
|
||||
modernc.org/sqlite v1.35.0 // indirect
|
||||
)
|
||||
|
||||
183
beszel/go.sum
183
beszel/go.sum
@@ -1,8 +1,8 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
|
||||
cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
|
||||
cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0=
|
||||
cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM=
|
||||
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
|
||||
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
|
||||
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
|
||||
@@ -22,44 +22,44 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.6 h1:fqgqEKK5HaZVWLQoLiC9Q+xDlSp+1LYidp6ybGE2OGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.6/go.mod h1:Ft+WLODzDQmCTHDvqAH1JfC2xxbZ0MxpZAcJqmE1LTQ=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.59 h1:5Vsrfdlf9KQP3leGX1dD7VwZq/3HAerEFoXAII4t6zo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.59/go.mod h1:7XTNs3NYApJjkx6A2Fk9qq23qBuBnIU58k3fKC2Fr1I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.6 h1:cCBJaT7EeEojpJ4s7wTDbhZlHVJOgNHN7iw6qVurGaw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.6/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.4 h1:DJYjOvNgC30JAcDCRmtQHoYK4trc7XetDXRTEAReGKA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.4/go.mod h1:KuLNrwYJFaC2AVZ+CVVc12k9NyqwgWsoNNHjwqF6QNk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.8 h1:RpwAfYcV2lr/yRc4lWhUM9JRPQqKgKWmou3LV7UfWP4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.8/go.mod h1:t+G7Fq1OcO8cXTPPXzxQSnj/5Xzdc9jAAD3Xrn9/Mgo=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.61 h1:Hd/uX6Wo2iUW1JWII+rmyCD7MMhOe7ALwQXN6sKDd1o=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.61/go.mod h1:L7vaLkwHY1qgW0gG1zG0z/X0sQ5tpIY5iI13+j3qI80=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.64 h1:RTko0AQ0i1vWXDM97DkuW6zskgOxFxm4RqC0kmBJFkE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.64/go.mod h1:ty968MpOa5CoQ/ALWNB8Gmfoehof2nRHDR/DZDPfimE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 h1:t/gZFyrijKuSU0elA5kRngP/oU3mc0I+Dvp8HwRE4c0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0 h1:EBm8lXevBWe+kK9VOU/IBeOI189WPRwPUc3LvJK9GOs=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0/go.mod h1:4qzsZSzB/KiX2EzDjs9D7A8rI/WGJxZceVJIHqtJjIU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 h1:2U9sF8nKy7UgyEeLiZTRg6ShBS22z8UnYpV6aRFL0is=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.0/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 h1:wjAdc85cXdQR5uLx5FwWvGIHm4OPJhTyzUHU8craXtE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 h1:BHEK2Q/7CMRMCb3nySi/w8UbIcPhKvYP5s1xf8/izn0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.16/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
|
||||
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
|
||||
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -67,7 +67,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec=
|
||||
github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
@@ -139,8 +139,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github/v30 v30.1.0 h1:VLDx+UolQICEOKu2m4uAoMti1SxuEBAl7RSEG16L+Oo=
|
||||
github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
@@ -175,8 +175,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/lufia/plan9stats v0.0.0-20250224150550-a661cff19cfb h1:YU0XAr3+rMpM8fP80KEesn32Qa9qkbquokvuwzWyYuA=
|
||||
github.com/lufia/plan9stats v0.0.0-20250224150550-a661cff19cfb/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -195,8 +195,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU=
|
||||
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||
github.com/pocketbase/pocketbase v0.25.0 h1:/4YQq1hd0muvhzbERyUTVNh88N0BCj5diqK0jtLN6k8=
|
||||
github.com/pocketbase/pocketbase v0.25.0/go.mod h1:tOtOv7f3vJhAiyUluIwV9JPuKeknZRQ9F6uJE3W/ntI=
|
||||
github.com/pocketbase/pocketbase v0.25.8 h1:XGAweEG7T05VkdPbnchrWyCkkrhuZienFgyt2S2VYnA=
|
||||
github.com/pocketbase/pocketbase v0.25.8/go.mod h1:gOnPr+g/GS+iqKh5XYXycdRWVGhiHY4c1H4TGjU9DDw=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -207,13 +207,12 @@ github.com/rhysd/go-github-selfupdate v1.2.3/go.mod h1:mp/N8zj6jFfBQy/XMYoWsmfzx
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI=
|
||||
github.com/shirou/gopsutil/v4 v4.25.2 h1:NMscG3l2CqtWFS86kj3vP7soOczqrQYIEhO/pMvvQkk=
|
||||
github.com/shirou/gopsutil/v4 v4.25.2/go.mod h1:34gBYJzyqCDT11b6bMHP0XCvWeU3J61XRT7a2EmCRTA=
|
||||
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -240,10 +239,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
@@ -259,19 +258,19 @@ gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c=
|
||||
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
|
||||
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -280,12 +279,12 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
|
||||
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -312,20 +311,20 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
|
||||
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns=
|
||||
google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY=
|
||||
google.golang.org/api v0.223.0 h1:JUTaWEriXmEy5AhvdMgksGGPEFsYfUKaPEYXd4c3Wvc=
|
||||
google.golang.org/api v0.223.0/go.mod h1:C+RS7Z+dDwds2b+zoAk5hN/eSfsiCn0UDrYof/M4d2M=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -337,8 +336,8 @@ google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phr
|
||||
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 h1:5bKytslY8ViY0Cj/ewmRtrWHW64bNF03cAatUUFCdFI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
@@ -355,8 +354,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
|
||||
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -369,27 +368,27 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
|
||||
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
|
||||
modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
|
||||
modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo=
|
||||
modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
|
||||
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
|
||||
modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw=
|
||||
modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8=
|
||||
modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI=
|
||||
modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
|
||||
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.35.0 h1:yQps4fegMnZFdphtzlfQTCNBWtS0CZv48pRpW3RFHRw=
|
||||
modernc.org/sqlite v1.35.0/go.mod h1:9cr2sicr7jIaWTBKQmAxQLfBv9LL0su4ZTEV+utt3ic=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/common"
|
||||
)
|
||||
@@ -27,11 +28,17 @@ type Agent struct {
|
||||
sensorsWhitelist map[string]struct{} // List of sensors to monitor
|
||||
systemInfo system.Info // Host system info
|
||||
gpuManager *GPUManager // Manages GPU data
|
||||
data *system.CombinedData
|
||||
updated time.Time
|
||||
sshUser string
|
||||
}
|
||||
|
||||
const cacheTime = 60 * time.Second
|
||||
|
||||
func NewAgent() *Agent {
|
||||
agent := &Agent{
|
||||
fsStats: make(map[string]*system.FsStats),
|
||||
data: &system.CombinedData{},
|
||||
}
|
||||
agent.memCalc, _ = GetEnv("MEM_CALC")
|
||||
|
||||
@@ -85,7 +92,7 @@ func NewAgent() *Agent {
|
||||
|
||||
// if debugging, print stats
|
||||
if agent.debug {
|
||||
slog.Debug("Stats", "data", agent.gatherStats())
|
||||
slog.Debug("Stats", "data", agent.gatherStats(context.Background()))
|
||||
}
|
||||
|
||||
return agent
|
||||
@@ -100,29 +107,36 @@ func GetEnv(key string) (value string, exists bool) {
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
func (a *Agent) gatherStats() system.CombinedData {
|
||||
func (a *Agent) gatherStats(ctx context.Context) *system.CombinedData {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
user := ctx.Value("user").(string)
|
||||
if time.Since(a.updated) < cacheTime && user != a.sshUser && a.data != nil {
|
||||
slog.Info("Using cached stats")
|
||||
return a.data
|
||||
}
|
||||
slog.Debug("Getting stats")
|
||||
systemData := system.CombinedData{
|
||||
*a.data = system.CombinedData{
|
||||
Stats: a.getSystemStats(),
|
||||
Info: a.systemInfo,
|
||||
}
|
||||
slog.Debug("System stats", "data", systemData)
|
||||
slog.Debug("System stats", "data", a.data)
|
||||
// add docker stats
|
||||
if containerStats, err := a.dockerManager.getDockerStats(); err == nil {
|
||||
systemData.Containers = containerStats
|
||||
slog.Debug("Docker stats", "data", systemData.Containers)
|
||||
a.data.Containers = containerStats
|
||||
slog.Debug("Docker stats", "data", a.data.Containers)
|
||||
} else {
|
||||
slog.Debug("Error getting docker stats", "err", err)
|
||||
}
|
||||
// add extra filesystems
|
||||
systemData.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||
a.data.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||
for name, stats := range a.fsStats {
|
||||
if !stats.Root && stats.DiskTotal > 0 {
|
||||
systemData.Stats.ExtraFs[name] = stats
|
||||
a.data.Stats.ExtraFs[name] = stats
|
||||
}
|
||||
}
|
||||
slog.Debug("Extra filesystems", "data", systemData.Stats.ExtraFs)
|
||||
return systemData
|
||||
slog.Debug("Extra filesystems", "data", a.data.Stats.ExtraFs)
|
||||
a.sshUser = user
|
||||
a.updated = time.Now()
|
||||
return a.data
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package agent
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
@@ -75,7 +76,7 @@ func (c *gpuCollector) collect() error {
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
if c.buf == nil {
|
||||
c.buf = make([]byte, 0, 4*1024)
|
||||
c.buf = make([]byte, 0, 16*1024)
|
||||
}
|
||||
scanner.Buffer(c.buf, bufio.MaxScanTokenSize)
|
||||
|
||||
@@ -110,27 +111,26 @@ func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
data := string(output)
|
||||
// Parse RAM usage
|
||||
ramMatches := ramPattern.FindStringSubmatch(data)
|
||||
ramMatches := ramPattern.FindSubmatch(output)
|
||||
if ramMatches != nil {
|
||||
gpuData.MemoryUsed, _ = strconv.ParseFloat(ramMatches[1], 64)
|
||||
gpuData.MemoryTotal, _ = strconv.ParseFloat(ramMatches[2], 64)
|
||||
gpuData.MemoryUsed, _ = strconv.ParseFloat(string(ramMatches[1]), 64)
|
||||
gpuData.MemoryTotal, _ = strconv.ParseFloat(string(ramMatches[2]), 64)
|
||||
}
|
||||
// Parse GR3D (GPU) usage
|
||||
gr3dMatches := gr3dPattern.FindStringSubmatch(data)
|
||||
gr3dMatches := gr3dPattern.FindSubmatch(output)
|
||||
if gr3dMatches != nil {
|
||||
gpuData.Usage, _ = strconv.ParseFloat(gr3dMatches[1], 64)
|
||||
gpuData.Usage, _ = strconv.ParseFloat(string(gr3dMatches[1]), 64)
|
||||
}
|
||||
// Parse temperature
|
||||
tempMatches := tempPattern.FindStringSubmatch(data)
|
||||
tempMatches := tempPattern.FindSubmatch(output)
|
||||
if tempMatches != nil {
|
||||
gpuData.Temperature, _ = strconv.ParseFloat(tempMatches[1], 64)
|
||||
gpuData.Temperature, _ = strconv.ParseFloat(string(tempMatches[1]), 64)
|
||||
}
|
||||
// Parse power usage
|
||||
powerMatches := powerPattern.FindStringSubmatch(data)
|
||||
powerMatches := powerPattern.FindSubmatch(output)
|
||||
if powerMatches != nil {
|
||||
power, _ := strconv.ParseFloat(powerMatches[2], 64)
|
||||
power, _ := strconv.ParseFloat(string(powerMatches[2]), 64)
|
||||
gpuData.Power = power / 1000
|
||||
}
|
||||
gpuData.Count++
|
||||
@@ -142,8 +142,10 @@ func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
||||
func (gm *GPUManager) parseNvidiaData(output []byte) bool {
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||
var valid bool
|
||||
for line := range strings.Lines(string(output)) {
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text() // Or use scanner.Bytes() for []byte
|
||||
fields := strings.Split(strings.TrimSpace(line), ", ")
|
||||
if len(fields) < 7 {
|
||||
continue
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
@@ -61,8 +62,9 @@ func (a *Agent) StartServer(opts ServerOptions) error {
|
||||
}
|
||||
|
||||
func (a *Agent) handleSession(s sshServer.Session) {
|
||||
// slog.Debug("connection", "remoteaddr", s.RemoteAddr(), "user", s.User())
|
||||
stats := a.gatherStats()
|
||||
slog.Debug("New session", "client", s.RemoteAddr(), "key", s.PublicKey().Marshal())
|
||||
ctx := context.WithValue(context.Background(), "user", s.User())
|
||||
stats := a.gatherStats(ctx)
|
||||
if err := json.NewEncoder(s).Encode(stats); err != nil {
|
||||
slog.Error("Error encoding stats", "err", err, "stats", stats)
|
||||
s.Exit(1)
|
||||
@@ -74,24 +76,18 @@ func (a *Agent) handleSession(s sshServer.Session) {
|
||||
// It returns a slice of ssh.PublicKey and an error if any key fails to parse.
|
||||
func ParseKeys(input string) ([]ssh.PublicKey, error) {
|
||||
var parsedKeys []ssh.PublicKey
|
||||
|
||||
for line := range strings.Lines(input) {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Skip empty lines or comments
|
||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(line))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse key: %s, error: %w", line, err)
|
||||
}
|
||||
|
||||
// Append the parsed key to the list
|
||||
parsedKeys = append(parsedKeys, parsedKey)
|
||||
}
|
||||
|
||||
return parsedKeys, nil
|
||||
}
|
||||
|
||||
@@ -26,8 +26,7 @@ type alertInfo struct {
|
||||
// startWorker is a long-running goroutine that processes alert tasks
|
||||
// every x seconds. It must be running to process status alerts.
|
||||
func (am *AlertManager) startWorker() {
|
||||
// no special reason for 13 seconds
|
||||
tick := time.Tick(13 * time.Second)
|
||||
tick := time.Tick(15 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-am.stopChan:
|
||||
@@ -64,21 +63,12 @@ func (am *AlertManager) StopWorker() {
|
||||
}
|
||||
|
||||
// HandleStatusAlerts manages the logic when system status changes.
|
||||
func (am *AlertManager) HandleStatusAlerts(newStatus string, oldSystemRecord *core.Record) error {
|
||||
switch newStatus {
|
||||
case "up":
|
||||
if oldSystemRecord.GetString("status") != "down" {
|
||||
return nil
|
||||
}
|
||||
case "down":
|
||||
if oldSystemRecord.GetString("status") != "up" {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
func (am *AlertManager) HandleStatusAlerts(newStatus string, systemRecord *core.Record) error {
|
||||
if newStatus != "up" && newStatus != "down" {
|
||||
return nil
|
||||
}
|
||||
|
||||
alertRecords, err := am.getSystemStatusAlerts(oldSystemRecord.Id)
|
||||
alertRecords, err := am.getSystemStatusAlerts(systemRecord.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -86,7 +76,7 @@ func (am *AlertManager) HandleStatusAlerts(newStatus string, oldSystemRecord *co
|
||||
return nil
|
||||
}
|
||||
|
||||
systemName := oldSystemRecord.GetString("name")
|
||||
systemName := systemRecord.GetString("name")
|
||||
if newStatus == "down" {
|
||||
am.handleSystemDown(systemName, alertRecords)
|
||||
} else {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, systemInfo system.Info, temperatures map[string]float64, extraFs map[string]*system.FsStats) error {
|
||||
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error {
|
||||
alertRecords, err := am.app.FindAllRecords("alerts",
|
||||
dbx.NewExp("system={:system}", dbx.Params{"system": systemRecord.Id}),
|
||||
)
|
||||
@@ -35,15 +35,15 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, systemInfo
|
||||
|
||||
switch name {
|
||||
case "CPU":
|
||||
val = systemInfo.Cpu
|
||||
val = data.Info.Cpu
|
||||
case "Memory":
|
||||
val = systemInfo.MemPct
|
||||
val = data.Info.MemPct
|
||||
case "Bandwidth":
|
||||
val = systemInfo.Bandwidth
|
||||
val = data.Info.Bandwidth
|
||||
unit = " MB/s"
|
||||
case "Disk":
|
||||
maxUsedPct := systemInfo.DiskPct
|
||||
for _, fs := range extraFs {
|
||||
maxUsedPct := data.Info.DiskPct
|
||||
for _, fs := range data.Stats.ExtraFs {
|
||||
usedPct := fs.DiskUsed / fs.DiskTotal * 100
|
||||
if usedPct > maxUsedPct {
|
||||
maxUsedPct = usedPct
|
||||
@@ -51,10 +51,10 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, systemInfo
|
||||
}
|
||||
val = maxUsedPct
|
||||
case "Temperature":
|
||||
if temperatures == nil {
|
||||
if data.Stats.Temperatures == nil {
|
||||
continue
|
||||
}
|
||||
for _, temp := range temperatures {
|
||||
for _, temp := range data.Stats.Temperatures {
|
||||
if temp > val {
|
||||
val = temp
|
||||
}
|
||||
@@ -111,7 +111,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, systemInfo
|
||||
)).
|
||||
OrderBy("created").
|
||||
All(&systemStats)
|
||||
if err != nil {
|
||||
if err != nil || len(systemStats) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, systemInfo
|
||||
alert.val += stats.NetSent + stats.NetRecv
|
||||
case "Disk":
|
||||
if alert.mapSums == nil {
|
||||
alert.mapSums = make(map[string]float32, len(extraFs)+1)
|
||||
alert.mapSums = make(map[string]float32, len(data.Stats.ExtraFs)+1)
|
||||
}
|
||||
// add root disk
|
||||
if _, ok := alert.mapSums["root"]; !ok {
|
||||
@@ -171,7 +171,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, systemInfo
|
||||
}
|
||||
alert.mapSums["root"] += float32(stats.Disk)
|
||||
// add extra disks
|
||||
for key, fs := range extraFs {
|
||||
for key, fs := range data.Stats.ExtraFs {
|
||||
if _, ok := alert.mapSums[key]; !ok {
|
||||
alert.mapSums[key] = 0.0
|
||||
}
|
||||
|
||||
@@ -4,67 +4,52 @@ package hub
|
||||
import (
|
||||
"beszel"
|
||||
"beszel/internal/alerts"
|
||||
"beszel/internal/entities/system"
|
||||
"beszel/internal/records"
|
||||
"beszel/internal/sysmanager"
|
||||
"beszel/internal/users"
|
||||
"beszel/site"
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/plugins/migratecmd"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type Hub struct {
|
||||
*pocketbase.PocketBase
|
||||
sshClientConfig *ssh.ClientConfig
|
||||
pubKey string
|
||||
am *alerts.AlertManager
|
||||
um *users.UserManager
|
||||
rm *records.RecordManager
|
||||
systemStats *core.Collection
|
||||
containerStats *core.Collection
|
||||
appURL string
|
||||
core.App
|
||||
*alerts.AlertManager
|
||||
pubKey string
|
||||
um *users.UserManager
|
||||
rm *records.RecordManager
|
||||
sm *sysmanager.SystemManager
|
||||
appURL string
|
||||
}
|
||||
|
||||
// NewHub creates a new Hub instance with default configuration
|
||||
func NewHub() *Hub {
|
||||
var hub Hub
|
||||
hub.PocketBase = pocketbase.NewWithConfig(pocketbase.Config{
|
||||
DefaultDataDir: beszel.AppName + "_data",
|
||||
})
|
||||
func NewHub(app core.App) *Hub {
|
||||
hub := &Hub{}
|
||||
hub.App = app
|
||||
|
||||
hub.RootCmd.Version = beszel.Version
|
||||
hub.RootCmd.Use = beszel.AppName
|
||||
hub.RootCmd.Short = ""
|
||||
// add update command
|
||||
hub.RootCmd.AddCommand(&cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update " + beszel.AppName + " to the latest version",
|
||||
Run: Update,
|
||||
})
|
||||
|
||||
hub.am = alerts.NewAlertManager(hub)
|
||||
hub.AlertManager = alerts.NewAlertManager(hub)
|
||||
hub.um = users.NewUserManager(hub)
|
||||
hub.rm = records.NewRecordManager(hub)
|
||||
sm, err := sysmanager.NewSystemManager(hub)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
hub.sm = sm
|
||||
hub.appURL, _ = GetEnv("APP_URL")
|
||||
return &hub
|
||||
return hub
|
||||
}
|
||||
|
||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
||||
@@ -76,23 +61,10 @@ func GetEnv(key string) (value string, exists bool) {
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
func (h *Hub) Run() {
|
||||
isDev := os.Getenv("ENV") == "dev"
|
||||
|
||||
// enable auto creation of migration files when making collection changes in the Admin UI
|
||||
migratecmd.MustRegister(h, h.RootCmd, migratecmd.Config{
|
||||
// (the isDev check is to enable it only during development)
|
||||
Automigrate: isDev,
|
||||
Dir: "../../migrations",
|
||||
})
|
||||
|
||||
func (h *Hub) Run() *Hub {
|
||||
// initial setup
|
||||
h.OnServe().BindFunc(func(se *core.ServeEvent) error {
|
||||
// create ssh client config
|
||||
err := h.createSSHClientConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h.OnBootstrap().BindFunc(func(be *core.BootstrapEvent) error {
|
||||
be.Next()
|
||||
// set general settings
|
||||
settings := h.Settings()
|
||||
// batch requests (for global alerts)
|
||||
@@ -126,12 +98,14 @@ func (h *Hub) Run() {
|
||||
}
|
||||
// sync systems with config
|
||||
h.syncSystemsWithConfig()
|
||||
return se.Next()
|
||||
// start system updates
|
||||
h.sm.Initialize()
|
||||
return nil
|
||||
})
|
||||
|
||||
// serve web ui
|
||||
h.OnServe().BindFunc(func(se *core.ServeEvent) error {
|
||||
switch isDev {
|
||||
switch h.IsDev() {
|
||||
case true:
|
||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||
Scheme: "http",
|
||||
@@ -175,19 +149,13 @@ func (h *Hub) Run() {
|
||||
return se.Next()
|
||||
})
|
||||
|
||||
// set up scheduled jobs / ticker for system updates
|
||||
// set up scheduled jobs
|
||||
h.OnServe().BindFunc(func(se *core.ServeEvent) error {
|
||||
// 15 second ticker for system updates
|
||||
go h.startSystemUpdateTicker()
|
||||
// set up cron jobs
|
||||
// delete old records once every hour
|
||||
h.Cron().MustAdd("delete old records", "8 * * * *", h.rm.DeleteOldRecords)
|
||||
// create longer records every 10 minutes
|
||||
h.Cron().MustAdd("create longer records", "*/10 * * * *", func() {
|
||||
if systemStats, containerStats, err := h.getCollections(); err == nil {
|
||||
h.rm.CreateLongerRecords([]*core.Collection{systemStats, containerStats})
|
||||
}
|
||||
})
|
||||
h.Cron().MustAdd("create longer records", "*/10 * * * *", h.rm.CreateLongerRecords)
|
||||
return se.Next()
|
||||
})
|
||||
|
||||
@@ -207,7 +175,7 @@ func (h *Hub) Run() {
|
||||
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
||||
})
|
||||
// send test notification
|
||||
se.Router.GET("/api/beszel/send-test-notification", h.am.SendTestNotification)
|
||||
se.Router.GET("/api/beszel/send-test-notification", h.SendTestNotification)
|
||||
// API endpoint to get config.yml content
|
||||
se.Router.GET("/api/beszel/config-yaml", h.getYamlConfig)
|
||||
// create first user endpoint only needed if no users exist
|
||||
@@ -217,303 +185,23 @@ func (h *Hub) Run() {
|
||||
return se.Next()
|
||||
})
|
||||
|
||||
// system creation defaults
|
||||
h.OnRecordCreate("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
e.Record.Set("info", system.Info{})
|
||||
e.Record.Set("status", "pending")
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// immediately create connection for new systems
|
||||
h.OnRecordAfterCreateSuccess("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
go h.updateSystem(e.Record)
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// handle default values for user / user_settings creation
|
||||
h.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
||||
h.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
||||
|
||||
// empty info for systems that are paused
|
||||
h.OnRecordUpdate("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
if e.Record.GetString("status") == "paused" {
|
||||
e.Record.Set("info", system.Info{})
|
||||
}
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// do things after a systems record is updated
|
||||
h.OnRecordAfterUpdateSuccess("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
newRecord := e.Record.Fresh()
|
||||
oldRecord := newRecord.Original()
|
||||
newStatus := newRecord.GetString("status")
|
||||
|
||||
// if system is not up and connection exists, remove it
|
||||
if newStatus != "up" {
|
||||
h.deleteSystemConnection(newRecord)
|
||||
}
|
||||
|
||||
// if system is set to pending (unpause), try to connect immediately
|
||||
if newStatus == "pending" {
|
||||
go h.updateSystem(newRecord)
|
||||
} else {
|
||||
h.am.HandleStatusAlerts(newStatus, oldRecord)
|
||||
}
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// if system is deleted, close connection
|
||||
h.OnRecordAfterDeleteSuccess("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
h.deleteSystemConnection(e.Record)
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
if err := h.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *Hub) startSystemUpdateTicker() {
|
||||
c := time.Tick(15 * time.Second)
|
||||
for range c {
|
||||
h.updateSystems()
|
||||
// Start starts the Hub server
|
||||
func (h *Hub) Start() error {
|
||||
// Use type assertion to access the Start method
|
||||
if pb, ok := h.App.(*pocketbase.PocketBase); ok {
|
||||
return pb.Start()
|
||||
}
|
||||
return fmt.Errorf("unable to start: App is not *pocketbase.PocketBase")
|
||||
}
|
||||
|
||||
func (h *Hub) updateSystems() {
|
||||
records, err := h.FindRecordsByFilter(
|
||||
"2hz5ncl8tizk5nx", // systems collection
|
||||
"status != 'paused'", // filter
|
||||
"updated", // sort
|
||||
-1, // limit
|
||||
0, // offset
|
||||
)
|
||||
// log.Println("records", len(records))
|
||||
if err != nil || len(records) == 0 {
|
||||
// h.Logger().Error("Failed to query systems")
|
||||
return
|
||||
}
|
||||
fiftySecondsAgo := time.Now().UTC().Add(-50 * time.Second)
|
||||
batchSize := len(records)/4 + 1
|
||||
done := 0
|
||||
for _, record := range records {
|
||||
// break if batch size reached or if the system was updated less than 50 seconds ago
|
||||
if done >= batchSize || record.GetDateTime("updated").Time().After(fiftySecondsAgo) {
|
||||
break
|
||||
}
|
||||
// don't increment for down systems to avoid them jamming the queue
|
||||
// because they're always first when sorted by least recently updated
|
||||
if record.GetString("status") != "down" {
|
||||
done++
|
||||
}
|
||||
go h.updateSystem(record)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) updateSystem(record *core.Record) {
|
||||
var client *ssh.Client
|
||||
var err error
|
||||
|
||||
// check if system connection exists
|
||||
if existingClient, ok := h.Store().GetOk(record.Id); ok {
|
||||
client = existingClient.(*ssh.Client)
|
||||
} else {
|
||||
// create system connection
|
||||
client, err = h.createSystemConnection(record)
|
||||
if err != nil {
|
||||
if record.GetString("status") != "down" {
|
||||
h.Logger().Error("Failed to connect:", "err", err.Error(), "system", record.GetString("host"), "port", record.GetString("port"))
|
||||
h.updateSystemStatus(record, "down")
|
||||
}
|
||||
return
|
||||
}
|
||||
h.Store().Set(record.Id, client)
|
||||
}
|
||||
// get system stats from agent
|
||||
var systemData system.CombinedData
|
||||
if err := h.requestJsonFromAgent(client, &systemData); err != nil {
|
||||
if err.Error() == "bad client" {
|
||||
// if previous connection was closed, try again
|
||||
h.Logger().Error("Existing SSH connection closed. Retrying...", "host", record.GetString("host"), "port", record.GetString("port"))
|
||||
h.deleteSystemConnection(record)
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
h.updateSystem(record)
|
||||
return
|
||||
}
|
||||
h.Logger().Error("Failed to get system stats: ", "err", err.Error())
|
||||
h.updateSystemStatus(record, "down")
|
||||
return
|
||||
}
|
||||
// update system record
|
||||
record.Set("status", "up")
|
||||
record.Set("info", systemData.Info)
|
||||
if err := h.SaveNoValidate(record); err != nil {
|
||||
h.Logger().Error("Failed to update record: ", "err", err.Error())
|
||||
}
|
||||
// add system_stats and container_stats records
|
||||
if systemStats, containerStats, err := h.getCollections(); err != nil {
|
||||
h.Logger().Error("Failed to get collections: ", "err", err.Error())
|
||||
} else {
|
||||
// add new system_stats record
|
||||
systemStatsRecord := core.NewRecord(systemStats)
|
||||
systemStatsRecord.Set("system", record.Id)
|
||||
systemStatsRecord.Set("stats", systemData.Stats)
|
||||
systemStatsRecord.Set("type", "1m")
|
||||
if err := h.SaveNoValidate(systemStatsRecord); err != nil {
|
||||
h.Logger().Error("Failed to save record: ", "err", err.Error())
|
||||
}
|
||||
// add new container_stats record
|
||||
if len(systemData.Containers) > 0 {
|
||||
containerStatsRecord := core.NewRecord(containerStats)
|
||||
containerStatsRecord.Set("system", record.Id)
|
||||
containerStatsRecord.Set("stats", systemData.Containers)
|
||||
containerStatsRecord.Set("type", "1m")
|
||||
if err := h.SaveNoValidate(containerStatsRecord); err != nil {
|
||||
h.Logger().Error("Failed to save record: ", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// system info alerts
|
||||
if err := h.am.HandleSystemAlerts(record, systemData.Info, systemData.Stats.Temperatures, systemData.Stats.ExtraFs); err != nil {
|
||||
h.Logger().Error("System alerts error", "err", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// return system_stats and container_stats collections
|
||||
func (h *Hub) getCollections() (*core.Collection, *core.Collection, error) {
|
||||
if h.systemStats == nil {
|
||||
systemStats, err := h.FindCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
h.systemStats = systemStats
|
||||
}
|
||||
if h.containerStats == nil {
|
||||
containerStats, err := h.FindCollectionByNameOrId("container_stats")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
h.containerStats = containerStats
|
||||
}
|
||||
return h.systemStats, h.containerStats, nil
|
||||
}
|
||||
|
||||
// set system to specified status and save record
|
||||
func (h *Hub) updateSystemStatus(record *core.Record, status string) {
|
||||
if record.Fresh().GetString("status") != status {
|
||||
record.Set("status", status)
|
||||
if err := h.SaveNoValidate(record); err != nil {
|
||||
h.Logger().Error("Failed to update record: ", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete system connection from map and close connection
|
||||
func (h *Hub) deleteSystemConnection(record *core.Record) {
|
||||
if client, ok := h.Store().GetOk(record.Id); ok {
|
||||
if sshClient := client.(*ssh.Client); sshClient != nil {
|
||||
sshClient.Close()
|
||||
}
|
||||
h.Store().Remove(record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) createSystemConnection(record *core.Record) (*ssh.Client, error) {
|
||||
network := "tcp"
|
||||
host := record.GetString("host")
|
||||
if strings.HasPrefix(host, "/") {
|
||||
network = "unix"
|
||||
} else {
|
||||
host = net.JoinHostPort(host, record.GetString("port"))
|
||||
}
|
||||
client, err := ssh.Dial(network, host, h.sshClientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (h *Hub) createSSHClientConfig() error {
|
||||
key, err := h.getSSHKey()
|
||||
if err != nil {
|
||||
h.Logger().Error("Failed to get SSH key: ", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the Signer for this private key.
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.sshClientConfig = &ssh.ClientConfig{
|
||||
User: "u",
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeys(signer),
|
||||
},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: 4 * time.Second,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetches system stats from the agent and decodes the json data into the provided struct
|
||||
func (h *Hub) requestJsonFromAgent(client *ssh.Client, systemData *system.CombinedData) error {
|
||||
session, err := newSessionWithTimeout(client, 4*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad client")
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
stdout, err := session.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := session.Shell(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(stdout).Decode(systemData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for the session to complete
|
||||
if err := session.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds timeout to SSH session creation to avoid hanging in case of network issues
|
||||
func newSessionWithTimeout(client *ssh.Client, timeout time.Duration) (*ssh.Session, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
// use goroutine to create the session
|
||||
sessionChan := make(chan *ssh.Session, 1)
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
if session, err := client.NewSession(); err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
sessionChan <- session
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case session := <-sessionChan:
|
||||
return session, nil
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("session creation timed out")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) getSSHKey() ([]byte, error) {
|
||||
func (h *Hub) GetSSHKey() ([]byte, error) {
|
||||
dataDir := h.DataDir()
|
||||
// check if the key pair already exists
|
||||
existingKey, err := os.ReadFile(dataDir + "/id_ed25519")
|
||||
|
||||
@@ -4,14 +4,15 @@ package records
|
||||
import (
|
||||
"beszel/internal/entities/container"
|
||||
"beszel/internal/entities/system"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
)
|
||||
|
||||
type RecordManager struct {
|
||||
@@ -25,10 +26,10 @@ type LongerRecordData struct {
|
||||
minShorterRecords int
|
||||
}
|
||||
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
// type RecordDeletionData struct {
|
||||
// recordType string
|
||||
// retention time.Duration
|
||||
// }
|
||||
|
||||
type RecordStats []struct {
|
||||
Stats []byte `db:"stats"`
|
||||
@@ -39,7 +40,7 @@ func NewRecordManager(app core.App) *RecordManager {
|
||||
}
|
||||
|
||||
// Create longer records by averaging shorter records
|
||||
func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
func (rm *RecordManager) CreateLongerRecords() {
|
||||
// start := time.Now()
|
||||
longerRecordData := []LongerRecordData{
|
||||
{
|
||||
@@ -70,14 +71,24 @@ func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
}
|
||||
// wrap the operations in a transaction
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
activeSystems, err := txApp.FindAllRecords("systems", dbx.NewExp("status = 'up'"))
|
||||
var err error
|
||||
collections := [2]*core.Collection{}
|
||||
collections[0], err = txApp.FindCachedCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
log.Println("failed to get active systems", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
collections[1], err = txApp.FindCachedCollectionByNameOrId("container_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var systems []struct {
|
||||
Id string `db:"id"`
|
||||
}
|
||||
|
||||
txApp.DB().NewQuery("SELECT id FROM systems WHERE status='up'").All(&systems)
|
||||
|
||||
// loop through all active systems, time periods, and collections
|
||||
for _, system := range activeSystems {
|
||||
for _, system := range systems {
|
||||
// log.Println("processing system", system.GetString("name"))
|
||||
for i := range longerRecordData {
|
||||
recordData := longerRecordData[i]
|
||||
@@ -92,7 +103,7 @@ func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
if recordData.longerType != "10m" {
|
||||
lastLongerRecord, err := txApp.FindFirstRecordByFilter(
|
||||
collection.Id,
|
||||
"type = {:type} && system = {:system} && created > {:created}",
|
||||
"system = {:system} && type = {:type} && created > {:created}",
|
||||
dbx.Params{"type": recordData.longerType, "system": system.Id, "created": longerRecordPeriod},
|
||||
)
|
||||
// continue if longer record exists
|
||||
@@ -108,7 +119,7 @@ func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
Select("stats").
|
||||
From(collection.Name).
|
||||
AndWhere(dbx.NewExp(
|
||||
"type={:type} AND system={:system} AND created > {:created}",
|
||||
"system={:system} AND type={:type} AND created > {:created}",
|
||||
dbx.Params{
|
||||
"type": recordData.shorterType,
|
||||
"system": system.Id,
|
||||
@@ -119,7 +130,6 @@ func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
|
||||
// continue if not enough shorter records
|
||||
if err != nil || len(stats) < recordData.minShorterRecords {
|
||||
// log.Println("not enough shorter records. continue.", len(allShorterRecords), recordData.expectedShorterRecords)
|
||||
continue
|
||||
}
|
||||
// average the shorter records and create longer record
|
||||
@@ -133,7 +143,7 @@ func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
longerRecord.Set("stats", rm.AverageContainerStats(stats))
|
||||
}
|
||||
if err := txApp.SaveNoValidate(longerRecord); err != nil {
|
||||
log.Println("failed to save longer record", "err", err.Error())
|
||||
log.Println("failed to save longer record", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -146,16 +156,20 @@ func (rm *RecordManager) CreateLongerRecords(collections []*core.Collection) {
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of system_stats records without reflect
|
||||
func (rm *RecordManager) AverageSystemStats(records RecordStats) system.Stats {
|
||||
sum := system.Stats{}
|
||||
func (rm *RecordManager) AverageSystemStats(records RecordStats) *system.Stats {
|
||||
sum := &system.Stats{}
|
||||
count := float64(len(records))
|
||||
// use different counter for temps in case some records don't have them
|
||||
tempCount := float64(0)
|
||||
|
||||
var stats system.Stats
|
||||
// Temporary struct for unmarshaling
|
||||
stats := &system.Stats{}
|
||||
|
||||
// Accumulate totals
|
||||
for i := range records {
|
||||
stats = system.Stats{} // Zero the struct before unmarshalling
|
||||
json.Unmarshal(records[i].Stats, &stats)
|
||||
*stats = system.Stats{} // Reset tempStats for unmarshaling
|
||||
if err := json.Unmarshal(records[i].Stats, stats); err != nil {
|
||||
continue
|
||||
}
|
||||
sum.Cpu += stats.Cpu
|
||||
sum.Mem += stats.Mem
|
||||
sum.MemUsed += stats.MemUsed
|
||||
@@ -171,26 +185,25 @@ func (rm *RecordManager) AverageSystemStats(records RecordStats) system.Stats {
|
||||
sum.DiskWritePs += stats.DiskWritePs
|
||||
sum.NetworkSent += stats.NetworkSent
|
||||
sum.NetworkRecv += stats.NetworkRecv
|
||||
// set peak values
|
||||
// Set peak values
|
||||
sum.MaxCpu = max(sum.MaxCpu, stats.MaxCpu, stats.Cpu)
|
||||
sum.MaxNetworkSent = max(sum.MaxNetworkSent, stats.MaxNetworkSent, stats.NetworkSent)
|
||||
sum.MaxNetworkRecv = max(sum.MaxNetworkRecv, stats.MaxNetworkRecv, stats.NetworkRecv)
|
||||
sum.MaxDiskReadPs = max(sum.MaxDiskReadPs, stats.MaxDiskReadPs, stats.DiskReadPs)
|
||||
sum.MaxDiskWritePs = max(sum.MaxDiskWritePs, stats.MaxDiskWritePs, stats.DiskWritePs)
|
||||
// add temps to sum
|
||||
|
||||
// Accumulate temperatures
|
||||
if stats.Temperatures != nil {
|
||||
if sum.Temperatures == nil {
|
||||
sum.Temperatures = make(map[string]float64, len(stats.Temperatures))
|
||||
}
|
||||
tempCount++
|
||||
for key, value := range stats.Temperatures {
|
||||
if _, ok := sum.Temperatures[key]; !ok {
|
||||
sum.Temperatures[key] = 0
|
||||
}
|
||||
sum.Temperatures[key] += value
|
||||
}
|
||||
}
|
||||
// add extra fs to sum
|
||||
|
||||
// Accumulate extra filesystem stats
|
||||
if stats.ExtraFs != nil {
|
||||
if sum.ExtraFs == nil {
|
||||
sum.ExtraFs = make(map[string]*system.FsStats, len(stats.ExtraFs))
|
||||
@@ -199,25 +212,26 @@ func (rm *RecordManager) AverageSystemStats(records RecordStats) system.Stats {
|
||||
if _, ok := sum.ExtraFs[key]; !ok {
|
||||
sum.ExtraFs[key] = &system.FsStats{}
|
||||
}
|
||||
sum.ExtraFs[key].DiskTotal += value.DiskTotal
|
||||
sum.ExtraFs[key].DiskUsed += value.DiskUsed
|
||||
sum.ExtraFs[key].DiskWritePs += value.DiskWritePs
|
||||
sum.ExtraFs[key].DiskReadPs += value.DiskReadPs
|
||||
// peak values
|
||||
sum.ExtraFs[key].MaxDiskReadPS = max(sum.ExtraFs[key].MaxDiskReadPS, value.MaxDiskReadPS, value.DiskReadPs)
|
||||
sum.ExtraFs[key].MaxDiskWritePS = max(sum.ExtraFs[key].MaxDiskWritePS, value.MaxDiskWritePS, value.DiskWritePs)
|
||||
fs := sum.ExtraFs[key]
|
||||
fs.DiskTotal += value.DiskTotal
|
||||
fs.DiskUsed += value.DiskUsed
|
||||
fs.DiskWritePs += value.DiskWritePs
|
||||
fs.DiskReadPs += value.DiskReadPs
|
||||
fs.MaxDiskReadPS = max(fs.MaxDiskReadPS, value.MaxDiskReadPS, value.DiskReadPs)
|
||||
fs.MaxDiskWritePS = max(fs.MaxDiskWritePS, value.MaxDiskWritePS, value.DiskWritePs)
|
||||
}
|
||||
}
|
||||
// add GPU data
|
||||
|
||||
// Accumulate GPU data
|
||||
if stats.GPUData != nil {
|
||||
if sum.GPUData == nil {
|
||||
sum.GPUData = make(map[string]system.GPUData, len(stats.GPUData))
|
||||
}
|
||||
for id, value := range stats.GPUData {
|
||||
if _, ok := sum.GPUData[id]; !ok {
|
||||
sum.GPUData[id] = system.GPUData{Name: value.Name}
|
||||
gpu, ok := sum.GPUData[id]
|
||||
if !ok {
|
||||
gpu = system.GPUData{Name: value.Name}
|
||||
}
|
||||
gpu := sum.GPUData[id]
|
||||
gpu.Temperature += value.Temperature
|
||||
gpu.MemoryUsed += value.MemoryUsed
|
||||
gpu.MemoryTotal += value.MemoryTotal
|
||||
@@ -229,76 +243,67 @@ func (rm *RecordManager) AverageSystemStats(records RecordStats) system.Stats {
|
||||
}
|
||||
}
|
||||
|
||||
stats = system.Stats{
|
||||
Cpu: twoDecimals(sum.Cpu / count),
|
||||
Mem: twoDecimals(sum.Mem / count),
|
||||
MemUsed: twoDecimals(sum.MemUsed / count),
|
||||
MemPct: twoDecimals(sum.MemPct / count),
|
||||
MemBuffCache: twoDecimals(sum.MemBuffCache / count),
|
||||
MemZfsArc: twoDecimals(sum.MemZfsArc / count),
|
||||
Swap: twoDecimals(sum.Swap / count),
|
||||
SwapUsed: twoDecimals(sum.SwapUsed / count),
|
||||
DiskTotal: twoDecimals(sum.DiskTotal / count),
|
||||
DiskUsed: twoDecimals(sum.DiskUsed / count),
|
||||
DiskPct: twoDecimals(sum.DiskPct / count),
|
||||
DiskReadPs: twoDecimals(sum.DiskReadPs / count),
|
||||
DiskWritePs: twoDecimals(sum.DiskWritePs / count),
|
||||
NetworkSent: twoDecimals(sum.NetworkSent / count),
|
||||
NetworkRecv: twoDecimals(sum.NetworkRecv / count),
|
||||
MaxCpu: sum.MaxCpu,
|
||||
MaxDiskReadPs: sum.MaxDiskReadPs,
|
||||
MaxDiskWritePs: sum.MaxDiskWritePs,
|
||||
MaxNetworkSent: sum.MaxNetworkSent,
|
||||
MaxNetworkRecv: sum.MaxNetworkRecv,
|
||||
}
|
||||
// Compute averages in place
|
||||
if count > 0 {
|
||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||
sum.Mem = twoDecimals(sum.Mem / count)
|
||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||
sum.MemPct = twoDecimals(sum.MemPct / count)
|
||||
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
||||
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
||||
sum.Swap = twoDecimals(sum.Swap / count)
|
||||
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
||||
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
||||
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
|
||||
if sum.Temperatures != nil {
|
||||
stats.Temperatures = make(map[string]float64, len(sum.Temperatures))
|
||||
for key, value := range sum.Temperatures {
|
||||
stats.Temperatures[key] = twoDecimals(value / tempCount)
|
||||
// Average temperatures
|
||||
if sum.Temperatures != nil && tempCount > 0 {
|
||||
for key := range sum.Temperatures {
|
||||
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sum.ExtraFs != nil {
|
||||
stats.ExtraFs = make(map[string]*system.FsStats, len(sum.ExtraFs))
|
||||
for key, value := range sum.ExtraFs {
|
||||
stats.ExtraFs[key] = &system.FsStats{
|
||||
DiskTotal: twoDecimals(value.DiskTotal / count),
|
||||
DiskUsed: twoDecimals(value.DiskUsed / count),
|
||||
DiskWritePs: twoDecimals(value.DiskWritePs / count),
|
||||
DiskReadPs: twoDecimals(value.DiskReadPs / count),
|
||||
MaxDiskReadPS: value.MaxDiskReadPS,
|
||||
MaxDiskWritePS: value.MaxDiskWritePS,
|
||||
// Average extra filesystem stats
|
||||
if sum.ExtraFs != nil {
|
||||
for key := range sum.ExtraFs {
|
||||
fs := sum.ExtraFs[key]
|
||||
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
}
|
||||
}
|
||||
|
||||
// Average GPU data
|
||||
if sum.GPUData != nil {
|
||||
for id := range sum.GPUData {
|
||||
gpu := sum.GPUData[id]
|
||||
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
||||
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
||||
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
||||
gpu.Usage = twoDecimals(gpu.Usage / count)
|
||||
gpu.Power = twoDecimals(gpu.Power / count)
|
||||
gpu.Count = twoDecimals(gpu.Count / count)
|
||||
sum.GPUData[id] = gpu
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sum.GPUData != nil {
|
||||
stats.GPUData = make(map[string]system.GPUData, len(sum.GPUData))
|
||||
for id, value := range sum.GPUData {
|
||||
stats.GPUData[id] = system.GPUData{
|
||||
Name: value.Name,
|
||||
Temperature: twoDecimals(value.Temperature / count),
|
||||
MemoryUsed: twoDecimals(value.MemoryUsed / count),
|
||||
MemoryTotal: twoDecimals(value.MemoryTotal / count),
|
||||
Usage: twoDecimals(value.Usage / count),
|
||||
Power: twoDecimals(value.Power / count),
|
||||
Count: twoDecimals(value.Count / count),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stats
|
||||
return sum
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of container_stats records
|
||||
func (rm *RecordManager) AverageContainerStats(records RecordStats) []container.Stats {
|
||||
sums := make(map[string]*container.Stats)
|
||||
count := float64(len(records))
|
||||
|
||||
var containerStats []container.Stats
|
||||
containerStats := make([]container.Stats, 0, 50)
|
||||
for i := range records {
|
||||
// Reset the slice length to 0, but keep the capacity
|
||||
// reset slice
|
||||
containerStats = containerStats[:0]
|
||||
if err := json.Unmarshal(records[i].Stats, &containerStats); err != nil {
|
||||
return []container.Stats{}
|
||||
@@ -330,38 +335,45 @@ func (rm *RecordManager) AverageContainerStats(records RecordStats) []container.
|
||||
|
||||
// Deletes records older than what is displayed in the UI
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
// Define the collections to process
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordData := []RecordDeletionData{
|
||||
{
|
||||
recordType: "1m",
|
||||
retention: time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "10m",
|
||||
retention: 12 * time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "20m",
|
||||
retention: 24 * time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "120m",
|
||||
retention: 7 * 24 * time.Hour,
|
||||
},
|
||||
{
|
||||
recordType: "480m",
|
||||
retention: 30 * 24 * time.Hour,
|
||||
},
|
||||
|
||||
// Define record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
db := rm.app.NonconcurrentDB()
|
||||
for _, recordData := range recordData {
|
||||
for _, collectionSlug := range collections {
|
||||
formattedDate := time.Now().UTC().Add(-recordData.retention).Format(types.DefaultDateLayout)
|
||||
expr := dbx.NewExp("[[created]] < {:date} AND [[type]] = {:type}", dbx.Params{"date": formattedDate, "type": recordData.recordType})
|
||||
_, err := db.Delete(collectionSlug, expr).Execute()
|
||||
if err != nil {
|
||||
rm.app.Logger().Error("Failed to delete records", "err", err.Error())
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
}
|
||||
|
||||
// Process each collection
|
||||
for _, collection := range collections {
|
||||
// Build the WHERE clause dynamically
|
||||
var conditionParts []string
|
||||
var params dbx.Params = make(map[string]any)
|
||||
|
||||
for i, rd := range recordData {
|
||||
// Create parameterized condition for this record type
|
||||
dateParam := fmt.Sprintf("date%d", i)
|
||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||
params[dateParam] = time.Now().UTC().Add(-rd.retention)
|
||||
}
|
||||
|
||||
// Combine conditions with OR
|
||||
conditionStr := strings.Join(conditionParts, " OR ")
|
||||
|
||||
// Construct the full raw query
|
||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||
|
||||
// Execute the query with parameters
|
||||
if _, err := rm.app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||
// return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
rm.app.Logger().Error("failed to delete", "collection", collection, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
494
beszel/internal/sysmanager/integration_test.go
Normal file
494
beszel/internal/sysmanager/integration_test.go
Normal file
@@ -0,0 +1,494 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package sysmanager_test
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/container"
|
||||
"beszel/internal/entities/system"
|
||||
"beszel/internal/sysmanager"
|
||||
"beszel/internal/tests"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSystemManagerIntegration(t *testing.T) {
|
||||
// Skip in short mode
|
||||
// if testing.Short() {
|
||||
// t.Skip("Skipping integration test in short mode")
|
||||
// }
|
||||
|
||||
// Create a test hub
|
||||
hub, err := tests.NewTestHub()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system manager
|
||||
sm, err := sysmanager.NewSystemManager(hub)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, sm)
|
||||
|
||||
// Test initialization
|
||||
sm.Initialize()
|
||||
|
||||
// Test system count after initialization
|
||||
t.Run("SystemCount", func(t *testing.T) {
|
||||
// Get the count of systems in the store
|
||||
count := sm.GetSystemCount()
|
||||
|
||||
// Query the database to get the expected count
|
||||
type countResult struct {
|
||||
Count int `db:"count"`
|
||||
}
|
||||
var result countResult
|
||||
err := hub.DB().NewQuery("SELECT COUNT(*) as count FROM systems WHERE status != 'paused'").One(&result)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The count in the store should match the count in the database
|
||||
assert.Equal(t, result.Count, count, "System count in store should match database count")
|
||||
})
|
||||
|
||||
// Test PocketBase collection existence
|
||||
t.Run("CollectionExistence", func(t *testing.T) {
|
||||
// Verify that required collections exist
|
||||
systems, err := hub.FindCachedCollectionByNameOrId("systems")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, systems)
|
||||
|
||||
systemStats, err := hub.FindCachedCollectionByNameOrId("system_stats")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, systemStats)
|
||||
|
||||
containerStats, err := hub.FindCachedCollectionByNameOrId("container_stats")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, containerStats)
|
||||
})
|
||||
|
||||
// Test adding a system record
|
||||
t.Run("AddRecord", func(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
// Get the count before adding the system
|
||||
countBefore := sm.GetSystemCount()
|
||||
|
||||
// record should be pending on create
|
||||
hub.OnRecordCreate("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
record := e.Record
|
||||
if record.GetString("name") == "welcometoarcoampm" {
|
||||
assert.Equal(t, "pending", e.Record.GetString("status"), "System status should be 'pending'")
|
||||
wg.Done()
|
||||
}
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// record should be down on update
|
||||
hub.OnRecordAfterUpdateSuccess("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
record := e.Record
|
||||
if record.GetString("name") == "welcometoarcoampm" {
|
||||
assert.Equal(t, "down", e.Record.GetString("status"), "System status should be 'pending'")
|
||||
wg.Done()
|
||||
}
|
||||
return e.Next()
|
||||
})
|
||||
// Create a test system with the first user assigned
|
||||
record, err := createTestSystem(t, hub, map[string]any{
|
||||
"name": "welcometoarcoampm",
|
||||
"host": "localhost",
|
||||
"port": "33914",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// system should be down if grabbed from the store
|
||||
assert.Equal(t, "down", sm.GetSystemStatusFromStore(record.Id), "System status should be 'down'")
|
||||
|
||||
// Check that the system count increased
|
||||
countAfter := sm.GetSystemCount()
|
||||
assert.Equal(t, countBefore+1, countAfter, "System count should increase after adding a system via event hook")
|
||||
|
||||
// Verify the system was added by checking if it exists
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should exist in the store")
|
||||
|
||||
// Verify the system host and port
|
||||
host, port := sm.GetSystemHostPort(record.Id)
|
||||
assert.Equal(t, record.Get("host"), host, "System host should match")
|
||||
assert.Equal(t, record.Get("port"), port, "System port should match")
|
||||
|
||||
// Verify the system is in the list of all system IDs
|
||||
ids := sm.GetAllSystemIDs()
|
||||
assert.Contains(t, ids, record.Id, "System ID should be in the list of all system IDs")
|
||||
|
||||
// Verify the system was added by checking if removing it works
|
||||
err = sm.RemoveSystem(record.Id)
|
||||
assert.NoError(t, err, "System should exist and be removable")
|
||||
})
|
||||
|
||||
t.Run("RemoveSystem", func(t *testing.T) {
|
||||
// Get the count before adding the system
|
||||
countBefore := sm.GetSystemCount()
|
||||
|
||||
// Create a test system record
|
||||
record, err := createTestSystem(t, hub, map[string]any{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Disable the auto updater to prevent status changes during the test
|
||||
// sm.DisableAutoUpdater(record.Id)
|
||||
|
||||
// Verify the system count increased
|
||||
countAfterAdd := sm.GetSystemCount()
|
||||
assert.Equal(t, countBefore+1, countAfterAdd, "System count should increase after adding a system via event hook")
|
||||
|
||||
// Verify the system exists
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should exist in the store")
|
||||
|
||||
// Remove the system
|
||||
err = sm.RemoveSystem(record.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check that the system count decreased
|
||||
countAfterRemove := sm.GetSystemCount()
|
||||
assert.Equal(t, countAfterAdd-1, countAfterRemove, "System count should decrease after removing a system")
|
||||
|
||||
// Verify the system no longer exists
|
||||
assert.False(t, sm.HasSystem(record.Id), "System should not exist in the store after removal")
|
||||
|
||||
// Verify the system is not in the list of all system IDs
|
||||
ids := sm.GetAllSystemIDs()
|
||||
assert.NotContains(t, ids, record.Id, "System ID should not be in the list of all system IDs after removal")
|
||||
|
||||
// Verify the system status is empty
|
||||
status := sm.GetSystemStatusFromStore(record.Id)
|
||||
assert.Equal(t, "", status, "System status should be empty after removal")
|
||||
|
||||
// Try to remove it again - should return an error since it's already removed
|
||||
err = sm.RemoveSystem(record.Id)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("FilterSystems", func(t *testing.T) {
|
||||
// Create a test system
|
||||
record, err := createTestSystem(t, hub, map[string]any{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add the record to the system manager
|
||||
err = sm.AddRecord(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test filtering records by status - should be "pending" now
|
||||
filter := "status = 'pending'"
|
||||
pendingSystems, err := hub.FindRecordsByFilter("systems", filter, "-created", 0, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(pendingSystems), 1)
|
||||
})
|
||||
|
||||
t.Run("EventHooks", func(t *testing.T) {
|
||||
// Create a test system record
|
||||
record, err := createTestSystem(t, hub, map[string]interface{}{
|
||||
"name": "event-test.example.com",
|
||||
"host": "event-test.example.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update the record - this should trigger the onRecordUpdate hook
|
||||
record.Set("host", "event-test-updated.example.com")
|
||||
err = hub.Save(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete the record
|
||||
// Note: We can't directly call DeleteRecord as it's not exposed
|
||||
// In a real test, we would verify that the hooks were called
|
||||
// This would require mocking or instrumenting the hooks
|
||||
})
|
||||
|
||||
t.Run("SystemStatusUpdate", func(t *testing.T) {
|
||||
// Create a test system record
|
||||
record, err := createTestSystem(t, hub, map[string]interface{}{
|
||||
"host": "status-test.example.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add the record to the system manager
|
||||
err = sm.AddRecord(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Disable auto updater to prevent interference
|
||||
sm.DisableAutoUpdater(record.Id)
|
||||
|
||||
// Test status changes
|
||||
initialStatus := sm.GetSystemStatusFromStore(record.Id)
|
||||
|
||||
// Set a new status
|
||||
sm.SetSystemStatusInDB(record.Id, "down")
|
||||
|
||||
// Verify status was updated
|
||||
newStatus := sm.GetSystemStatusFromStore(record.Id)
|
||||
assert.Equal(t, "down", newStatus, "System status should be updated to 'down'")
|
||||
assert.NotEqual(t, initialStatus, newStatus, "Status should have changed")
|
||||
|
||||
// Verify the database was updated
|
||||
updatedRecord, err := hub.FindRecordById("systems", record.Id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "down", updatedRecord.Get("status"), "Database status should match")
|
||||
|
||||
// We can't directly test setting the system down as it uses unexported methods
|
||||
// In a real test, we would use dependency injection or interfaces to mock this behavior
|
||||
})
|
||||
|
||||
t.Run("HandleSystemData", func(t *testing.T) {
|
||||
// Create a test system record
|
||||
record, err := createTestSystem(t, hub, map[string]any{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system data
|
||||
testData := &system.CombinedData{
|
||||
Info: system.Info{
|
||||
Hostname: "data-test.example.com",
|
||||
KernelVersion: "5.15.0-generic",
|
||||
Cores: 4,
|
||||
Threads: 8,
|
||||
CpuModel: "Test CPU",
|
||||
Uptime: 3600,
|
||||
Cpu: 25.5,
|
||||
MemPct: 40.2,
|
||||
DiskPct: 60.0,
|
||||
Bandwidth: 100.0,
|
||||
AgentVersion: "1.0.0",
|
||||
},
|
||||
Stats: system.Stats{
|
||||
Cpu: 25.5,
|
||||
Mem: 16384.0,
|
||||
MemUsed: 6553.6,
|
||||
MemPct: 40.0,
|
||||
DiskTotal: 1024000.0,
|
||||
DiskUsed: 614400.0,
|
||||
DiskPct: 60.0,
|
||||
NetworkSent: 1024.0,
|
||||
NetworkRecv: 2048.0,
|
||||
},
|
||||
Containers: []*container.Stats{},
|
||||
}
|
||||
|
||||
// Test handling system data
|
||||
err = hub.HandleSystemAlerts(record, testData)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("ErrorHandling", func(t *testing.T) {
|
||||
// Try to add a non-existent record
|
||||
nonExistentId := "non_existent_id"
|
||||
err := sm.RemoveSystem(nonExistentId)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Try to add a system with invalid host
|
||||
system := &sysmanager.System{
|
||||
Host: "",
|
||||
}
|
||||
err = sm.AddSystem(system)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("DeleteRecord", func(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
runs := 0
|
||||
|
||||
hub.OnRecordUpdate("systems").BindFunc(func(e *core.RecordEvent) error {
|
||||
runs++
|
||||
record := e.Record
|
||||
if record.GetString("name") == "deadflagblues" {
|
||||
if runs == 1 {
|
||||
assert.Equal(t, "up", e.Record.GetString("status"), "System status should be 'up'")
|
||||
wg.Done()
|
||||
} else if runs == 2 {
|
||||
assert.Equal(t, "paused", e.Record.GetString("status"), "System status should be 'paused'")
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// Create a test system record
|
||||
record, err := createTestSystem(t, hub, map[string]any{
|
||||
"name": "deadflagblues",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// stop the updater
|
||||
sm.DisableAutoUpdater(record.Id)
|
||||
|
||||
// Verify the system exists
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should exist in the store")
|
||||
|
||||
// set the status manually to up
|
||||
sm.SetSystemStatusInDB(record.Id, "up")
|
||||
|
||||
// verify the status is up
|
||||
assert.Equal(t, "up", sm.GetSystemStatusFromStore(record.Id), "System status should be 'up'")
|
||||
|
||||
// Set the status to "paused" which should cause it to be deleted from the store
|
||||
sm.SetSystemStatusInDB(record.Id, "paused")
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify the system no longer exists
|
||||
assert.False(t, sm.HasSystem(record.Id), "System should not exist in the store after deletion")
|
||||
})
|
||||
|
||||
t.Run("ConcurrentOperations", func(t *testing.T) {
|
||||
// Create a test system
|
||||
record, err := createTestSystem(t, hub, map[string]any{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Disable auto updater
|
||||
sm.DisableAutoUpdater(record.Id)
|
||||
|
||||
// Run concurrent operations
|
||||
const goroutines = 5
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines)
|
||||
|
||||
for i := range goroutines {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Alternate between different operations
|
||||
switch i % 3 {
|
||||
case 0:
|
||||
status := fmt.Sprintf("status-%d", i)
|
||||
sm.SetSystemStatusInDB(record.Id, status)
|
||||
case 1:
|
||||
_ = sm.GetSystemStatusFromStore(record.Id)
|
||||
case 2:
|
||||
_, _ = sm.GetSystemHostPort(record.Id)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify system still exists and is in a valid state
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should still exist after concurrent operations")
|
||||
status := sm.GetSystemStatusFromStore(record.Id)
|
||||
assert.NotEmpty(t, status, "System should have a status after concurrent operations")
|
||||
})
|
||||
|
||||
t.Run("ContextCancellation", func(t *testing.T) {
|
||||
// Create a test system record
|
||||
record, err := createTestSystem(t, hub, map[string]any{
|
||||
"name": "context-test-system",
|
||||
"host": "context-test.example.com",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the system exists in the store
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should exist in the store")
|
||||
|
||||
// Store the original context and cancel function
|
||||
originalCtx, originalCancel, err := sm.GetSystemContextFromStore(record.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Ensure the context is not nil
|
||||
assert.NotNil(t, originalCtx, "System context should not be nil")
|
||||
assert.NotNil(t, originalCancel, "System cancel function should not be nil")
|
||||
|
||||
// Cancel the context
|
||||
originalCancel()
|
||||
|
||||
// Wait a short time for cancellation to propagate
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify the context is done
|
||||
select {
|
||||
case <-originalCtx.Done():
|
||||
// Context was properly cancelled
|
||||
default:
|
||||
t.Fatal("Context was not cancelled")
|
||||
}
|
||||
|
||||
// Verify the system is still in the store (cancellation shouldn't remove it)
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should still exist after context cancellation")
|
||||
|
||||
// Explicitly remove the system
|
||||
err = sm.RemoveSystem(record.Id)
|
||||
assert.NoError(t, err, "RemoveSystem should succeed")
|
||||
|
||||
// Verify the system is removed
|
||||
assert.False(t, sm.HasSystem(record.Id), "System should be removed after RemoveSystem")
|
||||
|
||||
// Try to remove it again - should return an error
|
||||
err = sm.RemoveSystem(record.Id)
|
||||
assert.Error(t, err, "RemoveSystem should fail for non-existent system")
|
||||
|
||||
// Add the system back
|
||||
err = sm.AddRecord(record)
|
||||
require.NoError(t, err, "AddRecord should succeed")
|
||||
|
||||
// Verify the system is back in the store
|
||||
assert.True(t, sm.HasSystem(record.Id), "System should exist after re-adding")
|
||||
|
||||
// Verify a new context was created
|
||||
newCtx, newCancel, err := sm.GetSystemContextFromStore(record.Id)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, newCtx, "New system context should not be nil")
|
||||
assert.NotNil(t, newCancel, "New system cancel function should not be nil")
|
||||
assert.NotEqual(t, originalCtx, newCtx, "New context should be different from original")
|
||||
|
||||
// Clean up
|
||||
err = sm.RemoveSystem(record.Id)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// createTestSystem creates a test system record with a unique host name
|
||||
// and returns the created record and any error
|
||||
func createTestSystem(t *testing.T, hub *tests.TestHub, options map[string]any) (*core.Record, error) {
|
||||
collection, err := hub.FindCachedCollectionByNameOrId("systems")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get user record
|
||||
var firstUser *core.Record
|
||||
users, err := hub.FindAllRecords("users", dbx.NewExp("id != ''"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(users) > 0 {
|
||||
firstUser = users[0]
|
||||
}
|
||||
// Generate a unique host name to ensure we're adding a new system
|
||||
uniqueHost := fmt.Sprintf("test-host-%d.example.com", time.Now().UnixNano())
|
||||
|
||||
// Create the record
|
||||
record := core.NewRecord(collection)
|
||||
record.Set("name", uniqueHost)
|
||||
record.Set("host", uniqueHost)
|
||||
record.Set("port", "45876")
|
||||
record.Set("status", "pending")
|
||||
record.Set("users", []string{firstUser.Id})
|
||||
|
||||
// Apply any custom options
|
||||
for key, value := range options {
|
||||
record.Set(key, value)
|
||||
}
|
||||
|
||||
// Save the record to the database
|
||||
err = hub.Save(record)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
426
beszel/internal/sysmanager/sysmanager.go
Normal file
426
beszel/internal/sysmanager/sysmanager.go
Normal file
@@ -0,0 +1,426 @@
|
||||
package sysmanager
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/store"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
up string = "up"
|
||||
down string = "down"
|
||||
paused string = "paused"
|
||||
pending string = "pending"
|
||||
|
||||
interval int = 60_000
|
||||
|
||||
sessionTimeout = 4 * time.Second
|
||||
)
|
||||
|
||||
type SystemManager struct {
|
||||
hub hubLike
|
||||
systems *store.Store[string, *System]
|
||||
sshConfig *ssh.ClientConfig
|
||||
}
|
||||
|
||||
type System struct {
|
||||
Id string `db:"id"`
|
||||
Host string `db:"host"`
|
||||
Port string `db:"port"`
|
||||
Status string `db:"status"`
|
||||
manager *SystemManager
|
||||
client *ssh.Client
|
||||
data *system.CombinedData
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
type hubLike interface {
|
||||
core.App
|
||||
GetSSHKey() ([]byte, error)
|
||||
HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error
|
||||
HandleStatusAlerts(status string, systemRecord *core.Record) error
|
||||
}
|
||||
|
||||
func NewSystemManager(hub hubLike) (*SystemManager, error) {
|
||||
sm := &SystemManager{
|
||||
systems: store.New(map[string]*System{}),
|
||||
hub: hub,
|
||||
}
|
||||
key, err := sm.hub.GetSSHKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := sm.createSSHClientConfig(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sm, nil
|
||||
}
|
||||
|
||||
func (sm *SystemManager) Initialize() {
|
||||
sm.createEventHooks()
|
||||
var systems []*System
|
||||
err := sm.hub.DB().NewQuery("SELECT id, host, port, status FROM systems WHERE status != 'paused'").All(&systems)
|
||||
if err != nil || len(systems) == 0 {
|
||||
return
|
||||
}
|
||||
// start updating existing systems
|
||||
go func() {
|
||||
// time between initial system updates
|
||||
delta := interval / max(1, len(systems))
|
||||
delta = min(delta, 2_000)
|
||||
sleepTime := time.Duration(delta) * time.Millisecond
|
||||
for _, system := range systems {
|
||||
time.Sleep(sleepTime)
|
||||
_ = sm.AddSystem(system)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (sm *SystemManager) createEventHooks() {
|
||||
sm.hub.OnRecordCreate("systems").BindFunc(sm.onRecordCreate)
|
||||
sm.hub.OnRecordAfterCreateSuccess("systems").BindFunc(sm.onRecordAfterCreateSuccess)
|
||||
sm.hub.OnRecordUpdate("systems").BindFunc(sm.onRecordUpdate)
|
||||
sm.hub.OnRecordAfterUpdateSuccess("systems").BindFunc(sm.onRecordAfterUpdateSuccess)
|
||||
sm.hub.OnRecordAfterDeleteSuccess("systems").BindFunc(sm.onRecordAfterDeleteSuccess)
|
||||
}
|
||||
|
||||
// Runs before the record is committed to the database
|
||||
func (sm *SystemManager) onRecordCreate(e *core.RecordEvent) error {
|
||||
e.Record.Set("info", system.Info{})
|
||||
e.Record.Set("status", pending)
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
// Runs after the record is committed to the database
|
||||
func (sm *SystemManager) onRecordAfterCreateSuccess(e *core.RecordEvent) error {
|
||||
if err := sm.AddRecord(e.Record); err != nil {
|
||||
sm.hub.Logger().Error("Error adding record", "err", err)
|
||||
}
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
// Runs before the record is updated
|
||||
func (sm *SystemManager) onRecordUpdate(e *core.RecordEvent) error {
|
||||
if e.Record.GetString("status") == paused {
|
||||
e.Record.Set("info", system.Info{})
|
||||
}
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
// Runs after the record is updated
|
||||
func (sm *SystemManager) onRecordAfterUpdateSuccess(e *core.RecordEvent) error {
|
||||
newStatus := e.Record.GetString("status")
|
||||
switch newStatus {
|
||||
case paused:
|
||||
sm.RemoveSystem(e.Record.Id)
|
||||
return e.Next()
|
||||
case pending:
|
||||
if err := sm.AddRecord(e.Record); err != nil {
|
||||
sm.hub.Logger().Error("Error adding record", "err", err)
|
||||
}
|
||||
return e.Next()
|
||||
}
|
||||
system, ok := sm.systems.GetOk(e.Record.Id)
|
||||
if !ok {
|
||||
return sm.AddRecord(e.Record)
|
||||
}
|
||||
prevStatus := system.Status
|
||||
system.Status = newStatus
|
||||
// system alerts if system is up
|
||||
if system.Status == up {
|
||||
if err := sm.hub.HandleSystemAlerts(e.Record, system.data); err != nil {
|
||||
sm.hub.Logger().Error("Error handling system alerts", "err", err)
|
||||
}
|
||||
}
|
||||
if (system.Status == down && prevStatus == up) || (system.Status == up && prevStatus == down) {
|
||||
if err := sm.hub.HandleStatusAlerts(system.Status, e.Record); err != nil {
|
||||
sm.hub.Logger().Error("Error handling status alerts", "err", err)
|
||||
}
|
||||
}
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
// Runs after the record is deleted
|
||||
func (sm *SystemManager) onRecordAfterDeleteSuccess(e *core.RecordEvent) error {
|
||||
sm.RemoveSystem(e.Record.Id)
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
func (sm *SystemManager) createSSHClientConfig(key []byte) error {
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sm.sshConfig = &ssh.ClientConfig{
|
||||
User: "u",
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeys(signer),
|
||||
},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: sessionTimeout,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddSystem adds a system to the manager
|
||||
func (sm *SystemManager) AddSystem(sys *System) error {
|
||||
if sm.systems.Has(sys.Id) {
|
||||
return fmt.Errorf("system exists")
|
||||
}
|
||||
if sys.Id == "" || sys.Host == "" {
|
||||
return fmt.Errorf("system is missing required fields")
|
||||
}
|
||||
sys.manager = sm
|
||||
sys.ctx, sys.cancel = context.WithCancel(context.Background())
|
||||
sys.data = &system.CombinedData{}
|
||||
sm.systems.Set(sys.Id, sys)
|
||||
go sys.StartUpdater()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveSystem removes a system from the manager
|
||||
func (sm *SystemManager) RemoveSystem(systemID string) error {
|
||||
system, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return fmt.Errorf("system not found")
|
||||
}
|
||||
// cancel the context to signal stop
|
||||
if system.cancel != nil {
|
||||
system.cancel()
|
||||
}
|
||||
|
||||
if system.client != nil {
|
||||
system.client.Close()
|
||||
}
|
||||
sm.systems.Remove(systemID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSSHClient creates a new SSH client for the system
|
||||
func (s *System) CreateSSHClient() error {
|
||||
network := "tcp"
|
||||
host := s.Host
|
||||
if strings.HasPrefix(host, "/") {
|
||||
network = "unix"
|
||||
} else {
|
||||
host = net.JoinHostPort(host, s.Port)
|
||||
}
|
||||
var err error
|
||||
s.client, err = ssh.Dial(network, host, s.manager.sshConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRecord adds a system record to the manager.
|
||||
// It first removes any existing system with the same ID, then creates a new System
|
||||
// instance from the record data and adds it to the manager.
|
||||
// This function is typically called when a new system is created or when an existing
|
||||
// system's status changes to pending.
|
||||
func (sm *SystemManager) AddRecord(record *core.Record) (err error) {
|
||||
_ = sm.RemoveSystem(record.Id)
|
||||
system := &System{
|
||||
Id: record.Id,
|
||||
Status: record.GetString("status"),
|
||||
Host: record.GetString("host"),
|
||||
Port: record.GetString("port"),
|
||||
}
|
||||
return sm.AddSystem(system)
|
||||
}
|
||||
|
||||
// StartUpdater starts the system updater.
|
||||
// It first fetches the data from the agent then updates the records.
|
||||
// If the data is not found or the system is down, it sets the system down.
|
||||
func (sys *System) StartUpdater() {
|
||||
if sys.data == nil {
|
||||
sys.data = &system.CombinedData{}
|
||||
}
|
||||
if err := sys.update(); err != nil {
|
||||
_ = sys.setDown(err)
|
||||
}
|
||||
|
||||
c := time.Tick(time.Duration(interval) * time.Millisecond)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sys.ctx.Done():
|
||||
return
|
||||
case <-c:
|
||||
err := sys.update()
|
||||
if err != nil {
|
||||
_ = sys.setDown(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update updates the system data and records.
|
||||
// It first fetches the data from the agent then updates the records.
|
||||
func (sys *System) update() error {
|
||||
_, err := sys.fetchDataFromAgent()
|
||||
if err == nil {
|
||||
_, err = sys.updateRecords()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// fetchDataFromAgent fetches the data from the agent.
|
||||
// It first creates a new SSH client if it doesn't exist or the system is down.
|
||||
// Then it creates a new SSH session and fetches the data from the agent.
|
||||
// If the data is not found or the system is down, it sets the system down.
|
||||
func (sys *System) fetchDataFromAgent() (*system.CombinedData, error) {
|
||||
maxRetries := 1
|
||||
for attempt := 0; attempt <= maxRetries; attempt++ {
|
||||
if sys.client == nil || sys.Status == down {
|
||||
if err := sys.CreateSSHClient(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
session, err := sys.createSessionWithTimeout(4 * time.Second)
|
||||
if err != nil {
|
||||
if attempt >= maxRetries {
|
||||
return nil, err
|
||||
}
|
||||
sys.manager.hub.Logger().Warn("Session closed. Retrying...", "host", sys.Host, "port", sys.Port, "err", err)
|
||||
sys.client = nil
|
||||
continue
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
stdout, err := session.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := session.Shell(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// this is initialized in startUpdater, should never be nil
|
||||
*sys.data = system.CombinedData{}
|
||||
if err := json.NewDecoder(stdout).Decode(sys.data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// wait for the session to complete
|
||||
if err := session.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sys.data, nil
|
||||
}
|
||||
|
||||
// this should never be reached due to the return in the loop
|
||||
return nil, fmt.Errorf("failed to fetch data")
|
||||
}
|
||||
|
||||
// updateRecords updates the system record and adds system_stats and container_stats records
|
||||
func (sys *System) updateRecords() (*core.Record, error) {
|
||||
systemRecord, err := sys.getRecord()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hub := sys.manager.hub
|
||||
systemRecord.Set("status", up)
|
||||
systemRecord.Set("info", sys.data.Info)
|
||||
if err := hub.SaveNoValidate(systemRecord); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add system_stats and container_stats records
|
||||
systemStats, err := hub.FindCachedCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
systemStatsRecord := core.NewRecord(systemStats)
|
||||
systemStatsRecord.Set("system", systemRecord.Id)
|
||||
systemStatsRecord.Set("stats", sys.data.Stats)
|
||||
systemStatsRecord.Set("type", "1m")
|
||||
if err := hub.SaveNoValidate(systemStatsRecord); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add new container_stats record
|
||||
if len(sys.data.Containers) > 0 {
|
||||
containerStats, err := hub.FindCachedCollectionByNameOrId("container_stats")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerStatsRecord := core.NewRecord(containerStats)
|
||||
containerStatsRecord.Set("system", systemRecord.Id)
|
||||
containerStatsRecord.Set("stats", sys.data.Containers)
|
||||
containerStatsRecord.Set("type", "1m")
|
||||
if err := hub.SaveNoValidate(containerStatsRecord); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return systemRecord, nil
|
||||
}
|
||||
|
||||
// getRecord retrieves the system record from the database.
|
||||
// If the record is not found or the system is paused, it removes the system from the manager.
|
||||
func (sys *System) getRecord() (*core.Record, error) {
|
||||
record, err := sys.manager.hub.FindRecordById("systems", sys.Id)
|
||||
if err != nil || record == nil {
|
||||
_ = sys.manager.RemoveSystem(sys.Id)
|
||||
return nil, err
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// setDown marks a system as down in the database.
|
||||
// It takes the original error that caused the system to go down and returns any error
|
||||
// encountered during the process of updating the system status.
|
||||
func (sys *System) setDown(OriginalError error) error {
|
||||
if sys.Status == down {
|
||||
return nil
|
||||
}
|
||||
record, err := sys.getRecord()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sys.manager.hub.Logger().Error("System down", "system", record.GetString("name"), "err", OriginalError)
|
||||
record.Set("status", down)
|
||||
err = sys.manager.hub.SaveNoValidate(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSessionWithTimeout creates a new SSH session with a timeout to avoid hanging
|
||||
// in case of network issues
|
||||
func (sys *System) createSessionWithTimeout(timeout time.Duration) (*ssh.Session, error) {
|
||||
if sys.client == nil {
|
||||
return nil, fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(sys.ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
sessionChan := make(chan *ssh.Session, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
if session, err := sys.client.NewSession(); err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
sessionChan <- session
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case session := <-sessionChan:
|
||||
return session, nil
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("timeout")
|
||||
}
|
||||
}
|
||||
117
beszel/internal/sysmanager/sysmanager_test_helpers.go
Normal file
117
beszel/internal/sysmanager/sysmanager_test_helpers.go
Normal file
@@ -0,0 +1,117 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package sysmanager
|
||||
|
||||
import (
|
||||
"beszel/internal/entities/system"
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// GetSystemCount returns the number of systems in the store
|
||||
func (sm *SystemManager) GetSystemCount() int {
|
||||
return sm.systems.Length()
|
||||
}
|
||||
|
||||
// HasSystem checks if a system with the given ID exists in the store
|
||||
func (sm *SystemManager) HasSystem(systemID string) bool {
|
||||
return sm.systems.Has(systemID)
|
||||
}
|
||||
|
||||
// GetSystemStatusFromStore returns the status of a system with the given ID
|
||||
// Returns an empty string if the system doesn't exist
|
||||
func (sm *SystemManager) GetSystemStatusFromStore(systemID string) string {
|
||||
sys, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return sys.Status
|
||||
}
|
||||
|
||||
// GetSystemContextFromStore returns the context and cancel function for a system
|
||||
func (sm *SystemManager) GetSystemContextFromStore(systemID string) (context.Context, context.CancelFunc, error) {
|
||||
sys, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("no system")
|
||||
}
|
||||
return sys.ctx, sys.cancel, nil
|
||||
}
|
||||
|
||||
// GetSystemFromStore returns a store from the system
|
||||
func (sm *SystemManager) GetSystemFromStore(systemID string) (*System, error) {
|
||||
sys, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no system")
|
||||
}
|
||||
return sys, nil
|
||||
}
|
||||
|
||||
// GetAllSystemIDs returns a slice of all system IDs in the store
|
||||
func (sm *SystemManager) GetAllSystemIDs() []string {
|
||||
data := sm.systems.GetAll()
|
||||
ids := make([]string, 0, len(data))
|
||||
for id := range data {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// GetSystemData returns the combined data for a system with the given ID
|
||||
// Returns nil if the system doesn't exist
|
||||
// This method is primarily intended for testing
|
||||
func (sm *SystemManager) GetSystemData(systemID string) *system.CombinedData {
|
||||
sys, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return sys.data
|
||||
}
|
||||
|
||||
// GetSystemHostPort returns the host and port for a system with the given ID
|
||||
// Returns empty strings if the system doesn't exist
|
||||
func (sm *SystemManager) GetSystemHostPort(systemID string) (string, string) {
|
||||
sys, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return "", ""
|
||||
}
|
||||
return sys.Host, sys.Port
|
||||
}
|
||||
|
||||
// DisableAutoUpdater disables the automatic updater for a system
|
||||
// This is primarily intended for testing
|
||||
// Returns false if the system doesn't exist
|
||||
func (sm *SystemManager) DisableAutoUpdater(systemID string) bool {
|
||||
sys, ok := sm.systems.GetOk(systemID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if sys.cancel != nil {
|
||||
sys.cancel()
|
||||
sys.cancel = nil
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SetSystemStatusInDB sets the status of a system directly and updates the database record
|
||||
// This is primarily intended for testing
|
||||
// Returns false if the system doesn't exist
|
||||
func (sm *SystemManager) SetSystemStatusInDB(systemID string, status string) bool {
|
||||
if !sm.HasSystem(systemID) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Update the database record
|
||||
record, err := sm.hub.FindRecordById("systems", systemID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
record.Set("status", status)
|
||||
err = sm.hub.Save(record)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
58
beszel/internal/tests/hub.go
Normal file
58
beszel/internal/tests/hub.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Package tests provides common helpers and mocks used in PocketBase application tests.
|
||||
package tests
|
||||
|
||||
import (
|
||||
"beszel/internal/hub"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tests"
|
||||
|
||||
_ "github.com/pocketbase/pocketbase/migrations"
|
||||
)
|
||||
|
||||
// TestHub is a wrapper hub instance used for testing.
|
||||
type TestHub struct {
|
||||
*tests.TestApp
|
||||
core.App
|
||||
*hub.Hub
|
||||
}
|
||||
|
||||
// NewTestHub creates and initializes a test application instance.
|
||||
//
|
||||
// It is the caller's responsibility to call app.Cleanup() when the app is no longer needed.
|
||||
func NewTestHub(optTestDataDir ...string) (*TestHub, error) {
|
||||
var testDataDir string
|
||||
if len(optTestDataDir) > 0 {
|
||||
testDataDir = optTestDataDir[0]
|
||||
}
|
||||
|
||||
return NewTestHubWithConfig(core.BaseAppConfig{
|
||||
DataDir: testDataDir,
|
||||
EncryptionEnv: "pb_test_env",
|
||||
})
|
||||
}
|
||||
|
||||
// NewTestHubWithConfig creates and initializes a test application instance
|
||||
// from the provided config.
|
||||
//
|
||||
// If config.DataDir is not set it fallbacks to the default internal test data directory.
|
||||
//
|
||||
// config.DataDir is cloned for each new test application instance.
|
||||
//
|
||||
// It is the caller's responsibility to call app.Cleanup() when the app is no longer needed.
|
||||
func NewTestHubWithConfig(config core.BaseAppConfig) (*TestHub, error) {
|
||||
testApp, err := tests.NewTestAppWithConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hub := hub.NewHub(testApp)
|
||||
|
||||
t := &TestHub{
|
||||
TestApp: testApp,
|
||||
Hub: hub,
|
||||
App: testApp,
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
m "github.com/pocketbase/pocketbase/migrations"
|
||||
)
|
||||
|
||||
func init() {
|
||||
m.Register(func(app core.App) error {
|
||||
collection, err := app.FindCollectionByNameOrId("_pb_users_auth_")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update collection data
|
||||
if err := json.Unmarshal([]byte(`{
|
||||
"indexes": [
|
||||
"CREATE UNIQUE INDEX ` + "`" + `__pb_users_auth__email_idx` + "`" + ` ON ` + "`" + `users` + "`" + ` (` + "`" + `email` + "`" + `) WHERE ` + "`" + `email` + "`" + ` != ''",
|
||||
"CREATE UNIQUE INDEX ` + "`" + `__pb_users_auth__tokenKey_idx` + "`" + ` ON ` + "`" + `users` + "`" + ` (` + "`" + `tokenKey` + "`" + `)"
|
||||
]
|
||||
}`), &collection); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove field
|
||||
collection.Fields.RemoveById("text4166911607")
|
||||
|
||||
// update field
|
||||
if err := collection.Fields.AddMarshaledJSONAt(3, []byte(`{
|
||||
"exceptDomains": null,
|
||||
"hidden": false,
|
||||
"id": "email3885137012",
|
||||
"name": "email",
|
||||
"onlyDomains": null,
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "email"
|
||||
}`)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return app.Save(collection)
|
||||
}, func(app core.App) error {
|
||||
collection, err := app.FindCollectionByNameOrId("_pb_users_auth_")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update collection data
|
||||
if err := json.Unmarshal([]byte(`{
|
||||
"indexes": [
|
||||
"CREATE UNIQUE INDEX ` + "`" + `__pb_users_auth__username_idx` + "`" + ` ON ` + "`" + `users` + "`" + ` (username COLLATE NOCASE)",
|
||||
"CREATE UNIQUE INDEX ` + "`" + `__pb_users_auth__email_idx` + "`" + ` ON ` + "`" + `users` + "`" + ` (` + "`" + `email` + "`" + `) WHERE ` + "`" + `email` + "`" + ` != ''",
|
||||
"CREATE UNIQUE INDEX ` + "`" + `__pb_users_auth__tokenKey_idx` + "`" + ` ON ` + "`" + `users` + "`" + ` (` + "`" + `tokenKey` + "`" + `)"
|
||||
]
|
||||
}`), &collection); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add field
|
||||
if err := collection.Fields.AddMarshaledJSONAt(6, []byte(`{
|
||||
"autogeneratePattern": "users[0-9]{6}",
|
||||
"hidden": false,
|
||||
"id": "text4166911607",
|
||||
"max": 150,
|
||||
"min": 3,
|
||||
"name": "username",
|
||||
"pattern": "^[\\w][\\w\\.\\-]*$",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
}`)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update field
|
||||
if err := collection.Fields.AddMarshaledJSONAt(3, []byte(`{
|
||||
"exceptDomains": null,
|
||||
"hidden": false,
|
||||
"id": "email3885137012",
|
||||
"name": "email",
|
||||
"onlyDomains": null,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": true,
|
||||
"type": "email"
|
||||
}`)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return app.Save(collection)
|
||||
})
|
||||
}
|
||||
Binary file not shown.
@@ -31,34 +31,34 @@
|
||||
"@radix-ui/react-tabs": "^1.1.3",
|
||||
"@radix-ui/react-toast": "^1.2.6",
|
||||
"@radix-ui/react-tooltip": "^1.1.8",
|
||||
"@tanstack/react-table": "^8.20.6",
|
||||
"@tanstack/react-table": "^8.21.2",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.0.4",
|
||||
"d3-time": "^3.1.0",
|
||||
"lucide-react": "^0.452.0",
|
||||
"nanostores": "^0.11.3",
|
||||
"pocketbase": "^0.25.1",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"nanostores": "^0.11.4",
|
||||
"pocketbase": "^0.25.2",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
"recharts": "^2.15.1",
|
||||
"tailwind-merge": "^2.6.0",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"valibot": "^0.36.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@lingui/cli": "^4.14.1",
|
||||
"@lingui/swc-plugin": "^4.1.0",
|
||||
"@lingui/vite-plugin": "^4.14.1",
|
||||
"@types/bun": "^1.2.2",
|
||||
"@types/react": "^18.3.18",
|
||||
"@types/react-dom": "^18.3.5",
|
||||
"@vitejs/plugin-react-swc": "^3.7.2",
|
||||
"@lingui/cli": "^5.2.0",
|
||||
"@lingui/swc-plugin": "^5.4.0",
|
||||
"@lingui/vite-plugin": "^5.2.0",
|
||||
"@types/bun": "^1.2.4",
|
||||
"@types/react": "^19.0.0",
|
||||
"@types/react-dom": "^19.0.0",
|
||||
"@vitejs/plugin-react-swc": "^3.8.0",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"postcss": "^8.5.1",
|
||||
"postcss": "^8.5.3",
|
||||
"tailwindcss": "^3.4.17",
|
||||
"tailwindcss-rtl": "^0.9.0",
|
||||
"typescript": "^5.7.3",
|
||||
"typescript": "^5.8.2",
|
||||
"vite": "^5.4.14"
|
||||
},
|
||||
"overrides": {
|
||||
|
||||
@@ -142,6 +142,12 @@ export default function SystemsTable() {
|
||||
minSize: 0,
|
||||
accessorKey: "name",
|
||||
id: t`System`,
|
||||
filterFn: (row, _, filterVal) => {
|
||||
// allow filtering by name or status via input field
|
||||
const { name, status } = row.original
|
||||
filterVal = filterVal.toLowerCase()
|
||||
return name.toLowerCase().includes(filterVal) || t`${status}`.toLowerCase().includes(filterVal)
|
||||
},
|
||||
enableHiding: false,
|
||||
icon: ServerIcon,
|
||||
cell: (info) => (
|
||||
@@ -601,10 +607,12 @@ const ActionsButton = memo(({ system }: { system: SystemRecord }) => {
|
||||
)}
|
||||
<DropdownMenuItem
|
||||
className={cn(isReadOnlyUser() && "hidden")}
|
||||
onClick={() => {
|
||||
pb.collection("systems").update(id, {
|
||||
onClick={async () => {
|
||||
console.log("updating status", id, status, status === "paused" ? "pending" : "paused")
|
||||
const rest = await pb.collection("systems").update(id, {
|
||||
status: status === "paused" ? "pending" : "paused",
|
||||
})
|
||||
console.log("rest", rest)
|
||||
}}
|
||||
>
|
||||
{status === "paused" ? (
|
||||
|
||||
Reference in New Issue
Block a user